From 05f19206f4361aff9376d259853ce974c31cf017 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Thu, 24 Sep 2020 09:23:51 -0600 Subject: [PATCH] feat!: migrate to microgenerator (#61) --- .../CONTRIBUTING.rst | 19 - .../google-cloud-python-speech/UPGRADING.md | 146 + .../docs/UPGRADING.md | 1 + .../google-cloud-python-speech/docs/conf.py | 3 +- .../docs/gapic/v1/api.rst | 6 - .../docs/gapic/v1/types.rst | 5 - .../docs/gapic/v1p1beta1/api.rst | 6 - .../docs/gapic/v1p1beta1/types.rst | 5 - .../google-cloud-python-speech/docs/index.rst | 272 +- .../docs/speech_v1/services.rst | 6 + .../docs/speech_v1/types.rst | 5 + .../docs/speech_v1p1beta1/services.rst | 6 + .../docs/speech_v1p1beta1/types.rst | 5 + .../google/cloud/speech/__init__.py | 58 + .../google/cloud/speech/py.typed | 2 + .../google/cloud/speech_v1/__init__.py | 54 +- .../google/cloud/speech_v1/gapic/__init__.py | 0 .../google/cloud/speech_v1/gapic/enums.py | 197 -- .../cloud/speech_v1/gapic/speech_client.py | 410 --- .../speech_v1/gapic/speech_client_config.py | 57 - .../speech_v1/gapic/transports/__init__.py | 0 .../gapic/transports/speech_grpc_transport.py | 162 - .../google/cloud/speech_v1/helpers.py | 25 +- .../google/cloud/speech_v1/proto/__init__.py | 0 .../cloud/speech_v1/proto/cloud_speech_pb2.py | 2805 --------------- .../speech_v1/proto/cloud_speech_pb2_grpc.py | 181 - .../google/cloud/speech_v1/py.typed | 2 + .../{ => speech_v1/services}/__init__.py | 16 +- .../services/speech/__init__.py} | 17 +- .../speech_v1/services/speech/async_client.py | 408 +++ .../cloud/speech_v1/services/speech/client.py | 548 +++ .../services/speech/transports/__init__.py | 36 + .../services/speech/transports/base.py | 181 + .../services/speech/transports/grpc.py | 340 ++ .../speech/transports/grpc_asyncio.py | 344 ++ .../google/cloud/speech_v1/types.py | 56 - .../google/cloud/speech_v1/types/__init__.py | 57 + .../cloud/speech_v1/types/cloud_speech.py | 850 +++++ .../google/cloud/speech_v1p1beta1/__init__.py | 62 +- .../cloud/speech_v1p1beta1/gapic/__init__.py | 0 .../cloud/speech_v1p1beta1/gapic/enums.py | 201 -- .../speech_v1p1beta1/gapic/speech_client.py | 410 --- .../gapic/speech_client_config.py | 57 - .../gapic/transports/__init__.py | 0 .../gapic/transports/speech_grpc_transport.py | 162 - .../cloud/speech_v1p1beta1/proto/__init__.py | 0 .../proto/cloud_speech_pb2.py | 3083 ----------------- .../proto/cloud_speech_pb2_grpc.py | 181 - .../speech_v1p1beta1/proto/resource_pb2.py | 508 --- .../proto/resource_pb2_grpc.py | 3 - .../google/cloud/speech_v1p1beta1/py.typed | 2 + .../speech_v1p1beta1/services/__init__.py | 16 + .../services/speech}/__init__.py | 20 +- .../services/speech/async_client.py | 413 +++ .../services/speech/client.py | 580 ++++ .../services/speech/transports/__init__.py | 36 + .../services/speech/transports/base.py | 181 + .../services/speech/transports/grpc.py | 340 ++ .../speech/transports/grpc_asyncio.py | 344 ++ .../google/cloud/speech_v1p1beta1/types.py | 58 - .../cloud/speech_v1p1beta1/types/__init__.py | 65 + .../speech_v1p1beta1/types/cloud_speech.py | 939 +++++ .../cloud/speech_v1p1beta1/types/resource.py | 157 + packages/google-cloud-python-speech/mypy.ini | 3 + .../google-cloud-python-speech/noxfile.py | 14 +- .../transcribe_streaming_infinite.py | 81 +- .../microphone/transcribe_streaming_mic.py | 46 +- .../transcribe_streaming_mic_test.py | 19 +- .../samples/snippets/beta_snippets.py | 215 +- .../samples/snippets/beta_snippets_test.py | 17 +- .../samples/snippets/quickstart.py | 25 +- .../samples/snippets/quickstart_test.py | 2 +- .../snippets/speech_adaptation_beta.py | 8 +- .../snippets/speech_quickstart_beta.py | 7 +- .../samples/snippets/transcribe.py | 49 +- .../samples/snippets/transcribe_async.py | 61 +- .../samples/snippets/transcribe_async_test.py | 12 +- .../snippets/transcribe_auto_punctuation.py | 30 +- .../transcribe_auto_punctuation_test.py | 7 +- .../snippets/transcribe_context_classes.py | 22 +- .../transcribe_context_classes_test.py | 5 +- .../snippets/transcribe_enhanced_model.py | 29 +- .../transcribe_enhanced_model_test.py | 7 +- .../snippets/transcribe_model_selection.py | 69 +- .../transcribe_model_selection_test.py | 12 +- .../snippets/transcribe_multichannel.py | 57 +- .../snippets/transcribe_multichannel_test.py | 15 +- .../transcribe_onprem/transcribe_onprem.py | 19 +- .../samples/snippets/transcribe_streaming.py | 39 +- .../snippets/transcribe_streaming_test.py | 7 +- .../samples/snippets/transcribe_test.py | 11 +- .../snippets/transcribe_word_time_offsets.py | 71 +- .../transcribe_word_time_offsets_test.py | 12 +- .../scripts/decrypt-secrets.sh | 15 +- .../scripts/fixup_speech_v1_keywords.py | 180 + .../fixup_speech_v1p1beta1_keywords.py | 180 + packages/google-cloud-python-speech/setup.py | 20 +- .../google-cloud-python-speech/synth.metadata | 12 +- packages/google-cloud-python-speech/synth.py | 61 +- .../system/gapic/v1/test_system_speech_v1.py | 24 +- .../v1p1beta1/test_system_speech_v1p1beta1.py | 22 +- .../tests/unit/gapic/speech_v1/__init__.py | 1 + .../tests/unit/gapic/speech_v1/test_speech.py | 1130 ++++++ .../unit/gapic/speech_v1p1beta1/__init__.py | 1 + .../gapic/speech_v1p1beta1/test_speech.py | 1181 +++++++ .../unit/gapic/v1/test_speech_client_v1.py | 244 -- .../v1p1beta1/test_speech_client_v1p1beta1.py | 244 -- .../tests/unit/test_helpers.py | 6 +- 108 files changed, 9497 insertions(+), 9904 deletions(-) create mode 100644 packages/google-cloud-python-speech/UPGRADING.md create mode 120000 packages/google-cloud-python-speech/docs/UPGRADING.md delete mode 100644 packages/google-cloud-python-speech/docs/gapic/v1/api.rst delete mode 100644 packages/google-cloud-python-speech/docs/gapic/v1/types.rst delete mode 100644 packages/google-cloud-python-speech/docs/gapic/v1p1beta1/api.rst delete mode 100644 packages/google-cloud-python-speech/docs/gapic/v1p1beta1/types.rst create mode 100644 packages/google-cloud-python-speech/docs/speech_v1/services.rst create mode 100644 packages/google-cloud-python-speech/docs/speech_v1/types.rst create mode 100644 packages/google-cloud-python-speech/docs/speech_v1p1beta1/services.rst create mode 100644 packages/google-cloud-python-speech/docs/speech_v1p1beta1/types.rst create mode 100644 packages/google-cloud-python-speech/google/cloud/speech/__init__.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech/py.typed delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/__init__.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/enums.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/speech_client.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/speech_client_config.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/transports/__init__.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/transports/speech_grpc_transport.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/proto/__init__.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/proto/cloud_speech_pb2.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/proto/cloud_speech_pb2_grpc.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/py.typed rename packages/google-cloud-python-speech/google/cloud/{ => speech_v1/services}/__init__.py (66%) rename packages/google-cloud-python-speech/google/cloud/{speech.py => speech_v1/services/speech/__init__.py} (70%) create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/async_client.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/client.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/__init__.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/base.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/grpc.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/grpc_asyncio.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/types.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/types/__init__.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1/types/cloud_speech.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/__init__.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/enums.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/speech_client.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/speech_client_config.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/transports/__init__.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/transports/speech_grpc_transport.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/__init__.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/cloud_speech_pb2.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/cloud_speech_pb2_grpc.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/resource_pb2.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/resource_pb2_grpc.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/py.typed create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/__init__.py rename packages/google-cloud-python-speech/google/{ => cloud/speech_v1p1beta1/services/speech}/__init__.py (66%) create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/async_client.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/client.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/__init__.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/base.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/grpc.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/grpc_asyncio.py delete mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types/__init__.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types/cloud_speech.py create mode 100644 packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types/resource.py create mode 100644 packages/google-cloud-python-speech/mypy.ini create mode 100644 packages/google-cloud-python-speech/scripts/fixup_speech_v1_keywords.py create mode 100644 packages/google-cloud-python-speech/scripts/fixup_speech_v1p1beta1_keywords.py create mode 100644 packages/google-cloud-python-speech/tests/unit/gapic/speech_v1/__init__.py create mode 100644 packages/google-cloud-python-speech/tests/unit/gapic/speech_v1/test_speech.py create mode 100644 packages/google-cloud-python-speech/tests/unit/gapic/speech_v1p1beta1/__init__.py create mode 100644 packages/google-cloud-python-speech/tests/unit/gapic/speech_v1p1beta1/test_speech.py delete mode 100644 packages/google-cloud-python-speech/tests/unit/gapic/v1/test_speech_client_v1.py delete mode 100644 packages/google-cloud-python-speech/tests/unit/gapic/v1p1beta1/test_speech_client_v1p1beta1.py diff --git a/packages/google-cloud-python-speech/CONTRIBUTING.rst b/packages/google-cloud-python-speech/CONTRIBUTING.rst index db90cf3cc2d9..9dbfef9cb144 100644 --- a/packages/google-cloud-python-speech/CONTRIBUTING.rst +++ b/packages/google-cloud-python-speech/CONTRIBUTING.rst @@ -80,25 +80,6 @@ We use `nox `__ to instrument our tests. .. nox: https://pypi.org/project/nox/ -Note on Editable Installs / Develop Mode -======================================== - -- As mentioned previously, using ``setuptools`` in `develop mode`_ - or a ``pip`` `editable install`_ is not possible with this - library. This is because this library uses `namespace packages`_. - For context see `Issue #2316`_ and the relevant `PyPA issue`_. - - Since ``editable`` / ``develop`` mode can't be used, packages - need to be installed directly. Hence your changes to the source - tree don't get incorporated into the **already installed** - package. - -.. _namespace packages: https://www.python.org/dev/peps/pep-0420/ -.. _Issue #2316: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2316 -.. _PyPA issue: https://github.com/pypa/packaging-problems/issues/12 -.. _develop mode: https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode -.. _editable install: https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs - ***************************************** I'm getting weird errors... Can you help? ***************************************** diff --git a/packages/google-cloud-python-speech/UPGRADING.md b/packages/google-cloud-python-speech/UPGRADING.md new file mode 100644 index 000000000000..3d3374945d6b --- /dev/null +++ b/packages/google-cloud-python-speech/UPGRADING.md @@ -0,0 +1,146 @@ +# 2.0.0 Migration Guide + +The 2.0 release of the `google-cloud-speech` client is a significant upgrade based on a [next-gen code generator](https://github.com/googleapis/gapic-generator-python), and includes substantial interface changes. Existing code written for earlier versions of this library will likely require updates to use this version. This document describes the changes that have been made, and what you need to do to update your usage. + +If you experience issues or have questions, please file an [issue](https://github.com/googleapis/python-speech/issues). + +## Supported Python Versions + +> **WARNING**: Breaking change + +The 2.0.0 release requires Python 3.6+. + + +## Method Calls + +> **WARNING**: Breaking change + +Methods expect request objects. We provide a script that will convert most common use cases. + +* Install the library + +```py +python3 -m pip install google-cloud-speech +``` + +* The scripts `fixup_speech_v1_keywords.py` and `fixup_speech_v1p1beta1_keywords.py` are shipped with the library. It expects an input directory (with the code to convert) and an empty destination directory. + +```sh +$ fixup_speech_v1_keywords.py --input-directory .samples/ --output-directory samples/ +``` + +**Before:** +```py +from google.cloud import speech + +client = speech.SpeechClient() + +response = client.recognize(config, audio) +``` + + +**After:** +```py +from google.cloud import speech + +client = speech.SpeechClient() + +request = speech.RecognizeRequest(request={"config": config, "audio": audio}) +response = client.list_voices(request=request) +``` + +### More Details + +In `google-cloud-speech<2.0.0`, parameters required by the API were positional parameters and optional parameters were keyword parameters. + +**Before:** +```py + def recognize( + self, + config, + audio, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): +``` + +In the 2.0.0 release, all methods have a single positional parameter `request`. Method docstrings indicate whether a parameter is required or optional. + +Some methods have additional keyword only parameters. The available parameters depend on the [`google.api.method_signature` annotation](https://github.com/googleapis/googleapis/blob/3dbeac0d54125b123c8dfd39c774b37473c36944/google/cloud/speech/v1/cloud_speech.proto#L48) specified by the API producer. + + +**After:** +```py + def recognize( + self, + request: cloud_speech.RecognizeRequest = None, + *, + config: cloud_speech.RecognitionConfig = None, + audio: cloud_speech.RecognitionAudio = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cloud_speech.RecognizeResponse: +``` + +> **NOTE:** The `request` parameter and flattened keyword parameters for the API are mutually exclusive. +> Passing both will result in an error. + + +Both of these calls are valid: + +```py +response = client.recognize( + request={ + "config": config, + "audio": audio, + } +) +``` + +```py +response = client.recognize( + config=config, + audio=audio, +) +``` + +This call is invalid because it mixes `request` with a keyword argument `audio_config`. Executing this code +will result in an error. + +```py +response = client.recognize( + request={ + "config": config, + }, + audio=audio, +) +``` + + + +## Enums and Types + + +> **WARNING**: Breaking change + +The submodules `enums` and `types` have been removed. + +**Before:** +```py + +from google.cloud import speech + +encoding = speech.enums.RecognitionConfig.AudioEncoding.LINEAR16 +audio = speech.types.RecognitionAudio(content=content) +``` + + +**After:** +```py +from google.cloud import speech + +encoding = speech.RecognitionConfig.AudioEncoding.LINEAR16 +audio = speech.RecognitionAudio(content=content) +``` \ No newline at end of file diff --git a/packages/google-cloud-python-speech/docs/UPGRADING.md b/packages/google-cloud-python-speech/docs/UPGRADING.md new file mode 120000 index 000000000000..01097c8c0fb8 --- /dev/null +++ b/packages/google-cloud-python-speech/docs/UPGRADING.md @@ -0,0 +1 @@ +../UPGRADING.md \ No newline at end of file diff --git a/packages/google-cloud-python-speech/docs/conf.py b/packages/google-cloud-python-speech/docs/conf.py index c6d618f976d6..3f66f69f50d7 100644 --- a/packages/google-cloud-python-speech/docs/conf.py +++ b/packages/google-cloud-python-speech/docs/conf.py @@ -29,7 +29,7 @@ # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" +needs_sphinx = "1.5.5" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -39,6 +39,7 @@ "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.coverage", + "sphinx.ext.doctest", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", diff --git a/packages/google-cloud-python-speech/docs/gapic/v1/api.rst b/packages/google-cloud-python-speech/docs/gapic/v1/api.rst deleted file mode 100644 index a04f18ad6ba1..000000000000 --- a/packages/google-cloud-python-speech/docs/gapic/v1/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Client for Cloud Speech-to-Text API -=================================== - -.. automodule:: google.cloud.speech_v1 - :members: - :inherited-members: \ No newline at end of file diff --git a/packages/google-cloud-python-speech/docs/gapic/v1/types.rst b/packages/google-cloud-python-speech/docs/gapic/v1/types.rst deleted file mode 100644 index 71a61184ef32..000000000000 --- a/packages/google-cloud-python-speech/docs/gapic/v1/types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Types for Cloud Speech-to-Text API Client -========================================= - -.. automodule:: google.cloud.speech_v1.types - :members: \ No newline at end of file diff --git a/packages/google-cloud-python-speech/docs/gapic/v1p1beta1/api.rst b/packages/google-cloud-python-speech/docs/gapic/v1p1beta1/api.rst deleted file mode 100644 index 9493c970c2bc..000000000000 --- a/packages/google-cloud-python-speech/docs/gapic/v1p1beta1/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Client for Cloud Speech-to-Text API -=================================== - -.. automodule:: google.cloud.speech_v1p1beta1 - :members: - :inherited-members: \ No newline at end of file diff --git a/packages/google-cloud-python-speech/docs/gapic/v1p1beta1/types.rst b/packages/google-cloud-python-speech/docs/gapic/v1p1beta1/types.rst deleted file mode 100644 index 6ce93d567607..000000000000 --- a/packages/google-cloud-python-speech/docs/gapic/v1p1beta1/types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Types for Cloud Speech-to-Text API Client -========================================= - -.. automodule:: google.cloud.speech_v1p1beta1.types - :members: \ No newline at end of file diff --git a/packages/google-cloud-python-speech/docs/index.rst b/packages/google-cloud-python-speech/docs/index.rst index a9aabc984d0a..de7430796564 100644 --- a/packages/google-cloud-python-speech/docs/index.rst +++ b/packages/google-cloud-python-speech/docs/index.rst @@ -2,255 +2,6 @@ .. include:: multiprocessing.rst -Using the Library ------------------ - -Asynchronous Recognition -~~~~~~~~~~~~~~~~~~~~~~~~ - -The :meth:`~.speech_v1.SpeechClient.long_running_recognize` method -sends audio data to the Speech API and initiates a Long Running Operation. - -Using this operation, you can periodically poll for recognition results. -Use asynchronous requests for audio data of any duration up to 80 minutes. - -See: `Speech Asynchronous Recognize`_ - - -.. code-block:: python - - >>> from google.cloud import speech - >>> client = speech.SpeechClient() - >>> audio = speech.types.RecognitionAudio( - ... uri='gs://my-bucket/recording.flac') - >>> config = speech.types.RecognitionConfig( - ... encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, - ... language_code='en-US', - ... sample_rate_hertz=44100) - >>> operation = client.long_running_recognize(config=config, audio=audio) - >>> op_result = operation.result() - >>> for result in op_result.results: - ... for alternative in result.alternatives: - ... print('=' * 20) - ... print(alternative.transcript) - ... print(alternative.confidence) - ==================== - 'how old is the Brooklyn Bridge' - 0.98267895 - - -Synchronous Recognition -~~~~~~~~~~~~~~~~~~~~~~~ - -The :meth:`~.speech_v1.SpeechClient.recognize` method converts speech -data to text and returns alternative text transcriptions. - -This example uses ``language_code='en-GB'`` to better recognize a dialect from -Great Britain. - -.. code-block:: python - - >>> from google.cloud import speech - >>> client = speech.SpeechClient() - >>> audio = speech.types.RecognitionAudio( - ... uri='gs://my-bucket/recording.flac') - >>> config = speech.types.RecognitionConfig( - ... encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, - ... language_code='en-US', - ... sample_rate_hertz=44100) - >>> results = client.recognize(config=config, audio=audio) - >>> for result in results: - ... for alternative in result.alternatives: - ... print('=' * 20) - ... print('transcript: ' + alternative.transcript) - ... print('confidence: ' + str(alternative.confidence)) - ==================== - transcript: Hello, this is a test - confidence: 0.81 - ==================== - transcript: Hello, this is one test - confidence: 0 - -Example of using the profanity filter. - -.. code-block:: python - - >>> from google.cloud import speech - >>> client = speech.SpeechClient() - >>> audio = speech.types.RecognitionAudio( - ... uri='gs://my-bucket/recording.flac') - >>> config = speech.types.RecognitionConfig( - ... encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, - ... language_code='en-US', - ... sample_rate_hertz=44100, - ... profanity_filter=True) - >>> results = client.recognize(config=config, audio=audio) - >>> for result in results: - ... for alternative in result.alternatives: - ... print('=' * 20) - ... print('transcript: ' + alternative.transcript) - ... print('confidence: ' + str(alternative.confidence)) - ==================== - transcript: Hello, this is a f****** test - confidence: 0.81 - -Using speech context hints to get better results. This can be used to improve -the accuracy for specific words and phrases. This can also be used to add new -words to the vocabulary of the recognizer. - -.. code-block:: python - - >>> from google.cloud import speech - >>> from google.cloud import speech - >>> client = speech.SpeechClient() - >>> audio = speech.types.RecognitionAudio( - ... uri='gs://my-bucket/recording.flac') - >>> config = speech.types.RecognitionConfig( - ... encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, - ... language_code='en-US', - ... sample_rate_hertz=44100, - ... speech_contexts=[speech.types.SpeechContext( - ... phrases=['hi', 'good afternoon'], - ... )]) - >>> results = client.recognize(config=config, audio=audio) - >>> for result in results: - ... for alternative in result.alternatives: - ... print('=' * 20) - ... print('transcript: ' + alternative.transcript) - ... print('confidence: ' + str(alternative.confidence)) - ==================== - transcript: Hello, this is a test - confidence: 0.81 - - -Streaming Recognition -~~~~~~~~~~~~~~~~~~~~~ - -The :meth:`~speech_v1.SpeechClient.streaming_recognize` method converts -speech data to possible text alternatives on the fly. - -.. note:: - Streaming recognition requests are limited to 1 minute of audio. - - See: https://cloud.google.com/speech/limits#content - -.. code-block:: python - - >>> import io - >>> from google.cloud import speech - >>> client = speech.SpeechClient() - >>> config = speech.types.RecognitionConfig( - ... encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, - ... language_code='en-US', - ... sample_rate_hertz=44100, - ... ) - >>> with io.open('./hello.wav', 'rb') as stream: - ... requests = [speech.types.StreamingRecognizeRequest( - ... audio_content=stream.read(), - ... )] - >>> results = sample.streaming_recognize( - ... config=speech.types.StreamingRecognitionConfig(config=config), - ... requests, - ... ) - >>> for result in results: - ... for alternative in result.alternatives: - ... print('=' * 20) - ... print('transcript: ' + alternative.transcript) - ... print('confidence: ' + str(alternative.confidence)) - ==================== - transcript: hello thank you for using Google Cloud platform - confidence: 0.927983105183 - - -By default the API will perform continuous recognition -(continuing to process audio even if the speaker in the audio pauses speaking) -until the client closes the output stream or until the maximum time limit has -been reached. - -If you only want to recognize a single utterance you can set -``single_utterance`` to :data:`True` and only one result will be returned. - -See: `Single Utterance`_ - -.. code-block:: python - - >>> import io - >>> from google.cloud import speech - >>> client = speech.SpeechClient() - >>> config = speech.types.RecognitionConfig( - ... encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, - ... language_code='en-US', - ... sample_rate_hertz=44100, - ... ) - >>> with io.open('./hello-pause-goodbye.wav', 'rb') as stream: - ... requests = [speech.types.StreamingRecognizeRequest( - ... audio_content=stream.read(), - ... )] - >>> results = sample.streaming_recognize( - ... config=speech.types.StreamingRecognitionConfig( - ... config=config, - ... single_utterance=False, - ... ), - ... requests, - ... ) - >>> for result in results: - ... for alternative in result.alternatives: - ... print('=' * 20) - ... print('transcript: ' + alternative.transcript) - ... print('confidence: ' + str(alternative.confidence)) - ... for result in results: - ... for alternative in result.alternatives: - ... print('=' * 20) - ... print('transcript: ' + alternative.transcript) - ... print('confidence: ' + str(alternative.confidence)) - ==================== - transcript: testing a pause - confidence: 0.933770477772 - -If ``interim_results`` is set to :data:`True`, interim results -(tentative hypotheses) may be returned as they become available. - -.. code-block:: python - - >>> import io - >>> from google.cloud import speech - >>> client = speech.SpeechClient() - >>> config = speech.types.RecognitionConfig( - ... encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, - ... language_code='en-US', - ... sample_rate_hertz=44100, - ... ) - >>> with io.open('./hello.wav', 'rb') as stream: - ... requests = [speech.types.StreamingRecognizeRequest( - ... audio_content=stream.read(), - ... )] - >>> config = speech.types.StreamingRecognitionConfig(config=config) - >>> responses = client.streaming_recognize(config,requests) - >>> for response in responses: - ... for result in response: - ... for alternative in result.alternatives: - ... print('=' * 20) - ... print('transcript: ' + alternative.transcript) - ... print('confidence: ' + str(alternative.confidence)) - ... print('is_final:' + str(result.is_final)) - ==================== - 'he' - None - False - ==================== - 'hell' - None - False - ==================== - 'hello' - 0.973458576 - True - - -.. _Single Utterance: https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#streamingrecognitionconfig -.. _sync_recognize: https://cloud.google.com/speech/reference/rest/v1beta1/speech/syncrecognize -.. _Speech Asynchronous Recognize: https://cloud.google.com/speech/reference/rest/v1beta1/speech/asyncrecognize - API Reference ------------- @@ -258,20 +9,27 @@ API Reference .. toctree:: :maxdepth: 2 - gapic/v1/api - gapic/v1/types - -A new beta release, spelled ``v1p1beta1``, is provided to provide for preview -of upcoming features. In order to use this, you will want to import from -``google.cloud.speech_v1p1beta1`` in lieu of ``google.cloud.speech``. + speech_v1/services + speech_v1/types An API and type reference is provided the first beta also: .. toctree:: :maxdepth: 2 - gapic/v1p1beta1/api - gapic/v1p1beta1/types + speech_v1p1beta1/services + speech_v1p1beta1/types + +Migration Guide +--------------- + +See the guide below for instructions on migrating to the 2.x release of this library. + +.. toctree:: + :maxdepth: 2 + + UPGRADING + Changelog --------- diff --git a/packages/google-cloud-python-speech/docs/speech_v1/services.rst b/packages/google-cloud-python-speech/docs/speech_v1/services.rst new file mode 100644 index 000000000000..2d1b3d29b163 --- /dev/null +++ b/packages/google-cloud-python-speech/docs/speech_v1/services.rst @@ -0,0 +1,6 @@ +Services for Google Cloud Speech v1 API +======================================= + +.. automodule:: google.cloud.speech_v1.services.speech + :members: + :inherited-members: diff --git a/packages/google-cloud-python-speech/docs/speech_v1/types.rst b/packages/google-cloud-python-speech/docs/speech_v1/types.rst new file mode 100644 index 000000000000..775370680a4f --- /dev/null +++ b/packages/google-cloud-python-speech/docs/speech_v1/types.rst @@ -0,0 +1,5 @@ +Types for Google Cloud Speech v1 API +==================================== + +.. automodule:: google.cloud.speech_v1.types + :members: diff --git a/packages/google-cloud-python-speech/docs/speech_v1p1beta1/services.rst b/packages/google-cloud-python-speech/docs/speech_v1p1beta1/services.rst new file mode 100644 index 000000000000..dbbb066cbb89 --- /dev/null +++ b/packages/google-cloud-python-speech/docs/speech_v1p1beta1/services.rst @@ -0,0 +1,6 @@ +Services for Google Cloud Speech v1p1beta1 API +============================================== + +.. automodule:: google.cloud.speech_v1p1beta1.services.speech + :members: + :inherited-members: diff --git a/packages/google-cloud-python-speech/docs/speech_v1p1beta1/types.rst b/packages/google-cloud-python-speech/docs/speech_v1p1beta1/types.rst new file mode 100644 index 000000000000..4c6b842199e5 --- /dev/null +++ b/packages/google-cloud-python-speech/docs/speech_v1p1beta1/types.rst @@ -0,0 +1,5 @@ +Types for Google Cloud Speech v1p1beta1 API +=========================================== + +.. automodule:: google.cloud.speech_v1p1beta1.types + :members: diff --git a/packages/google-cloud-python-speech/google/cloud/speech/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech/__init__.py new file mode 100644 index 000000000000..0f8052890376 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech/__init__.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.speech_v1.services.speech.async_client import SpeechAsyncClient +from google.cloud.speech_v1 import SpeechClient +from google.cloud.speech_v1.types.cloud_speech import LongRunningRecognizeMetadata +from google.cloud.speech_v1.types.cloud_speech import LongRunningRecognizeRequest +from google.cloud.speech_v1.types.cloud_speech import LongRunningRecognizeResponse +from google.cloud.speech_v1.types.cloud_speech import RecognitionAudio +from google.cloud.speech_v1.types.cloud_speech import RecognitionConfig +from google.cloud.speech_v1.types.cloud_speech import RecognitionMetadata +from google.cloud.speech_v1.types.cloud_speech import RecognizeRequest +from google.cloud.speech_v1.types.cloud_speech import RecognizeResponse +from google.cloud.speech_v1.types.cloud_speech import SpeakerDiarizationConfig +from google.cloud.speech_v1.types.cloud_speech import SpeechContext +from google.cloud.speech_v1.types.cloud_speech import SpeechRecognitionAlternative +from google.cloud.speech_v1.types.cloud_speech import SpeechRecognitionResult +from google.cloud.speech_v1.types.cloud_speech import StreamingRecognitionConfig +from google.cloud.speech_v1.types.cloud_speech import StreamingRecognitionResult +from google.cloud.speech_v1.types.cloud_speech import StreamingRecognizeRequest +from google.cloud.speech_v1.types.cloud_speech import StreamingRecognizeResponse +from google.cloud.speech_v1.types.cloud_speech import WordInfo + +__all__ = ( + "LongRunningRecognizeMetadata", + "LongRunningRecognizeRequest", + "LongRunningRecognizeResponse", + "RecognitionAudio", + "RecognitionConfig", + "RecognitionMetadata", + "RecognizeRequest", + "RecognizeResponse", + "SpeakerDiarizationConfig", + "SpeechAsyncClient", + "SpeechClient", + "SpeechContext", + "SpeechRecognitionAlternative", + "SpeechRecognitionResult", + "StreamingRecognitionConfig", + "StreamingRecognitionResult", + "StreamingRecognizeRequest", + "StreamingRecognizeResponse", + "WordInfo", +) diff --git a/packages/google-cloud-python-speech/google/cloud/speech/py.typed b/packages/google-cloud-python-speech/google/cloud/speech/py.typed new file mode 100644 index 000000000000..02081c09b9ab --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-speech package uses inline types. diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/__init__.py index 81314685022a..321f4c98258f 100644 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1/__init__.py +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/__init__.py @@ -1,4 +1,6 @@ -# Copyright 2017 Google LLC +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,20 +13,52 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -from __future__ import absolute_import +from .services.speech import SpeechClient +from .types.cloud_speech import LongRunningRecognizeMetadata +from .types.cloud_speech import LongRunningRecognizeRequest +from .types.cloud_speech import LongRunningRecognizeResponse +from .types.cloud_speech import RecognitionAudio +from .types.cloud_speech import RecognitionConfig +from .types.cloud_speech import RecognitionMetadata +from .types.cloud_speech import RecognizeRequest +from .types.cloud_speech import RecognizeResponse +from .types.cloud_speech import SpeakerDiarizationConfig +from .types.cloud_speech import SpeechContext +from .types.cloud_speech import SpeechRecognitionAlternative +from .types.cloud_speech import SpeechRecognitionResult +from .types.cloud_speech import StreamingRecognitionConfig +from .types.cloud_speech import StreamingRecognitionResult +from .types.cloud_speech import StreamingRecognizeRequest +from .types.cloud_speech import StreamingRecognizeResponse +from .types.cloud_speech import WordInfo -from google.cloud.speech_v1.gapic import speech_client -from google.cloud.speech_v1.gapic import enums from google.cloud.speech_v1.helpers import SpeechHelpers -from google.cloud.speech_v1 import types -class SpeechClient(SpeechHelpers, speech_client.SpeechClient): - __doc__ = speech_client.SpeechClient.__doc__ - enums = enums - types = types +class SpeechClient(SpeechHelpers, SpeechClient): + __doc__ = SpeechClient.__doc__ -__all__ = ("enums", "SpeechClient", "types") +__all__ = ( + "LongRunningRecognizeMetadata", + "LongRunningRecognizeRequest", + "LongRunningRecognizeResponse", + "RecognitionAudio", + "RecognitionConfig", + "RecognitionMetadata", + "RecognizeRequest", + "RecognizeResponse", + "SpeakerDiarizationConfig", + "SpeechContext", + "SpeechRecognitionAlternative", + "SpeechRecognitionResult", + "StreamingRecognitionConfig", + "StreamingRecognitionResult", + "StreamingRecognizeRequest", + "StreamingRecognizeResponse", + "WordInfo", + "SpeechClient", +) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/enums.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/enums.py deleted file mode 100644 index e880d4349c54..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/enums.py +++ /dev/null @@ -1,197 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class RecognitionConfig(object): - class AudioEncoding(enum.IntEnum): - """ - The encoding of the audio data sent in the request. - - All encodings support only 1 channel (mono) audio, unless the - ``audio_channel_count`` and ``enable_separate_recognition_per_channel`` - fields are set. - - For best results, the audio source should be captured and transmitted - using a lossless encoding (``FLAC`` or ``LINEAR16``). The accuracy of - the speech recognition can be reduced if lossy codecs are used to - capture or transmit audio, particularly if background noise is present. - Lossy codecs include ``MULAW``, ``AMR``, ``AMR_WB``, ``OGG_OPUS``, - ``SPEEX_WITH_HEADER_BYTE``, and ``MP3``. - - The ``FLAC`` and ``WAV`` audio file formats include a header that - describes the included audio content. You can request recognition for - ``WAV`` files that contain either ``LINEAR16`` or ``MULAW`` encoded - audio. If you send ``FLAC`` or ``WAV`` audio file format in your - request, you do not need to specify an ``AudioEncoding``; the audio - encoding format is determined from the file header. If you specify an - ``AudioEncoding`` when you send send ``FLAC`` or ``WAV`` audio, the - encoding configuration must match the encoding described in the audio - header; otherwise the request returns an - ``google.rpc.Code.INVALID_ARGUMENT`` error code. - - Attributes: - ENCODING_UNSPECIFIED (int): Not specified. - LINEAR16 (int): Uncompressed 16-bit signed little-endian samples (Linear PCM). - FLAC (int): ``FLAC`` (Free Lossless Audio Codec) is the recommended encoding - because it is lossless--therefore recognition is not compromised--and - requires only about half the bandwidth of ``LINEAR16``. ``FLAC`` stream - encoding supports 16-bit and 24-bit samples, however, not all fields in - ``STREAMINFO`` are supported. - MULAW (int): 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. - AMR (int): Adaptive Multi-Rate Narrowband codec. ``sample_rate_hertz`` must be - 8000. - AMR_WB (int): Adaptive Multi-Rate Wideband codec. ``sample_rate_hertz`` must be - 16000. - OGG_OPUS (int): Opus encoded audio frames in Ogg container - (`OggOpus `__). ``sample_rate_hertz`` - must be one of 8000, 12000, 16000, 24000, or 48000. - SPEEX_WITH_HEADER_BYTE (int): Although the use of lossy encodings is not recommended, if a very - low bitrate encoding is required, ``OGG_OPUS`` is highly preferred over - Speex encoding. The `Speex `__ encoding supported by - Cloud Speech API has a header byte in each block, as in MIME type - ``audio/x-speex-with-header-byte``. It is a variant of the RTP Speex - encoding defined in `RFC 5574 `__. - The stream is a sequence of blocks, one block per RTP packet. Each block - starts with a byte containing the length of the block, in bytes, - followed by one or more frames of Speex data, padded to an integral - number of bytes (octets) as specified in RFC 5574. In other words, each - RTP header is replaced with a single byte containing the block length. - Only Speex wideband is supported. ``sample_rate_hertz`` must be 16000. - """ - - ENCODING_UNSPECIFIED = 0 - LINEAR16 = 1 - FLAC = 2 - MULAW = 3 - AMR = 4 - AMR_WB = 5 - OGG_OPUS = 6 - SPEEX_WITH_HEADER_BYTE = 7 - - -class RecognitionMetadata(object): - class InteractionType(enum.IntEnum): - """ - Use case categories that the audio recognition request can be described - by. - - Attributes: - INTERACTION_TYPE_UNSPECIFIED (int): Use case is either unknown or is something other than one of the other - values below. - DISCUSSION (int): Multiple people in a conversation or discussion. For example in a - meeting with two or more people actively participating. Typically all - the primary people speaking would be in the same room (if not, see - PHONE_CALL) - PRESENTATION (int): One or more persons lecturing or presenting to others, mostly - uninterrupted. - PHONE_CALL (int): A phone-call or video-conference in which two or more people, who are - not in the same room, are actively participating. - VOICEMAIL (int): A recorded message intended for another person to listen to. - PROFESSIONALLY_PRODUCED (int): Professionally produced audio (eg. TV Show, Podcast). - VOICE_SEARCH (int): Transcribe spoken questions and queries into text. - VOICE_COMMAND (int): Transcribe voice commands, such as for controlling a device. - DICTATION (int): Transcribe speech to text to create a written document, such as a - text-message, email or report. - """ - - INTERACTION_TYPE_UNSPECIFIED = 0 - DISCUSSION = 1 - PRESENTATION = 2 - PHONE_CALL = 3 - VOICEMAIL = 4 - PROFESSIONALLY_PRODUCED = 5 - VOICE_SEARCH = 6 - VOICE_COMMAND = 7 - DICTATION = 8 - - class MicrophoneDistance(enum.IntEnum): - """ - Enumerates the types of capture settings describing an audio file. - - Attributes: - MICROPHONE_DISTANCE_UNSPECIFIED (int): Audio type is not known. - NEARFIELD (int): The audio was captured from a closely placed microphone. Eg. phone, - dictaphone, or handheld microphone. Generally if there speaker is within - 1 meter of the microphone. - MIDFIELD (int): The speaker if within 3 meters of the microphone. - FARFIELD (int): The speaker is more than 3 meters away from the microphone. - """ - - MICROPHONE_DISTANCE_UNSPECIFIED = 0 - NEARFIELD = 1 - MIDFIELD = 2 - FARFIELD = 3 - - class OriginalMediaType(enum.IntEnum): - """ - The original media the speech was recorded on. - - Attributes: - ORIGINAL_MEDIA_TYPE_UNSPECIFIED (int): Unknown original media type. - AUDIO (int): The speech data is an audio recording. - VIDEO (int): The speech data originally recorded on a video. - """ - - ORIGINAL_MEDIA_TYPE_UNSPECIFIED = 0 - AUDIO = 1 - VIDEO = 2 - - class RecordingDeviceType(enum.IntEnum): - """ - The type of device the speech was recorded with. - - Attributes: - RECORDING_DEVICE_TYPE_UNSPECIFIED (int): The recording device is unknown. - SMARTPHONE (int): Speech was recorded on a smartphone. - PC (int): Speech was recorded using a personal computer or tablet. - PHONE_LINE (int): Speech was recorded over a phone line. - VEHICLE (int): Speech was recorded in a vehicle. - OTHER_OUTDOOR_DEVICE (int): Speech was recorded outdoors. - OTHER_INDOOR_DEVICE (int): Speech was recorded indoors. - """ - - RECORDING_DEVICE_TYPE_UNSPECIFIED = 0 - SMARTPHONE = 1 - PC = 2 - PHONE_LINE = 3 - VEHICLE = 4 - OTHER_OUTDOOR_DEVICE = 5 - OTHER_INDOOR_DEVICE = 6 - - -class StreamingRecognizeResponse(object): - class SpeechEventType(enum.IntEnum): - """ - Indicates the type of speech event. - - Attributes: - SPEECH_EVENT_UNSPECIFIED (int): No speech event specified. - END_OF_SINGLE_UTTERANCE (int): This event indicates that the server has detected the end of the - user's speech utterance and expects no additional speech. Therefore, the - server will not process additional audio (although it may subsequently - return additional results). The client should stop sending additional - audio data, half-close the gRPC connection, and wait for any additional - results until the server closes the gRPC connection. This event is only - sent if ``single_utterance`` was set to ``true``, and is not used - otherwise. - """ - - SPEECH_EVENT_UNSPECIFIED = 0 - END_OF_SINGLE_UTTERANCE = 1 diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/speech_client.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/speech_client.py deleted file mode 100644 index ab4b2145488c..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/speech_client.py +++ /dev/null @@ -1,410 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.speech.v1 Speech API.""" - -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.speech_v1.gapic import enums -from google.cloud.speech_v1.gapic import speech_client_config -from google.cloud.speech_v1.gapic.transports import speech_grpc_transport -from google.cloud.speech_v1.proto import cloud_speech_pb2 -from google.cloud.speech_v1.proto import cloud_speech_pb2_grpc -from google.longrunning import operations_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-speech",).version - - -class SpeechClient(object): - """Service that implements Google Cloud Speech API.""" - - SERVICE_ADDRESS = "speech.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.speech.v1.Speech" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpeechClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.SpeechGrpcTransport, - Callable[[~.Credentials, type], ~.SpeechGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = speech_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=speech_grpc_transport.SpeechGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = speech_grpc_transport.SpeechGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def recognize( - self, - config, - audio, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Performs synchronous speech recognition: receive results after all audio - has been sent and processed. - - Example: - >>> from google.cloud import speech_v1 - >>> from google.cloud.speech_v1 import enums - >>> - >>> client = speech_v1.SpeechClient() - >>> - >>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC - >>> sample_rate_hertz = 44100 - >>> language_code = 'en-US' - >>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code} - >>> uri = 'gs://bucket_name/file_name.flac' - >>> audio = {'uri': uri} - >>> - >>> response = client.recognize(config, audio) - - Args: - config (Union[dict, ~google.cloud.speech_v1.types.RecognitionConfig]): Required. Provides information to the recognizer that specifies how to - process the request. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.speech_v1.types.RecognitionConfig` - audio (Union[dict, ~google.cloud.speech_v1.types.RecognitionAudio]): Required. The audio data to be recognized. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.speech_v1.types.RecognitionAudio` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.speech_v1.types.RecognizeResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "recognize" not in self._inner_api_calls: - self._inner_api_calls[ - "recognize" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.recognize, - default_retry=self._method_configs["Recognize"].retry, - default_timeout=self._method_configs["Recognize"].timeout, - client_info=self._client_info, - ) - - request = cloud_speech_pb2.RecognizeRequest(config=config, audio=audio,) - return self._inner_api_calls["recognize"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def long_running_recognize( - self, - config, - audio, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Performs asynchronous speech recognition: receive results via the - google.longrunning.Operations interface. Returns either an - ``Operation.error`` or an ``Operation.response`` which contains a - ``LongRunningRecognizeResponse`` message. For more information on - asynchronous speech recognition, see the - `how-to `__. - - Example: - >>> from google.cloud import speech_v1 - >>> from google.cloud.speech_v1 import enums - >>> - >>> client = speech_v1.SpeechClient() - >>> - >>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC - >>> sample_rate_hertz = 44100 - >>> language_code = 'en-US' - >>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code} - >>> uri = 'gs://bucket_name/file_name.flac' - >>> audio = {'uri': uri} - >>> - >>> response = client.long_running_recognize(config, audio) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - config (Union[dict, ~google.cloud.speech_v1.types.RecognitionConfig]): Required. Provides information to the recognizer that specifies how to - process the request. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.speech_v1.types.RecognitionConfig` - audio (Union[dict, ~google.cloud.speech_v1.types.RecognitionAudio]): Required. The audio data to be recognized. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.speech_v1.types.RecognitionAudio` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.speech_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "long_running_recognize" not in self._inner_api_calls: - self._inner_api_calls[ - "long_running_recognize" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.long_running_recognize, - default_retry=self._method_configs["LongRunningRecognize"].retry, - default_timeout=self._method_configs["LongRunningRecognize"].timeout, - client_info=self._client_info, - ) - - request = cloud_speech_pb2.LongRunningRecognizeRequest( - config=config, audio=audio, - ) - operation = self._inner_api_calls["long_running_recognize"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - cloud_speech_pb2.LongRunningRecognizeResponse, - metadata_type=cloud_speech_pb2.LongRunningRecognizeMetadata, - ) - - def streaming_recognize( - self, - requests, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Performs bidirectional streaming speech recognition: receive results while - sending audio. This method is only available via the gRPC API (not REST). - - Example: - >>> from google.cloud import speech_v1 - >>> - >>> client = speech_v1.SpeechClient() - >>> - >>> request = {} - >>> - >>> requests = [request] - >>> for element in client.streaming_recognize(requests): - ... # process element - ... pass - - Args: - requests (iterator[dict|google.cloud.speech_v1.proto.cloud_speech_pb2.StreamingRecognizeRequest]): The input objects. If a dict is provided, it must be of the - same form as the protobuf message :class:`~google.cloud.speech_v1.types.StreamingRecognizeRequest` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.speech_v1.types.StreamingRecognizeResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "streaming_recognize" not in self._inner_api_calls: - self._inner_api_calls[ - "streaming_recognize" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.streaming_recognize, - default_retry=self._method_configs["StreamingRecognize"].retry, - default_timeout=self._method_configs["StreamingRecognize"].timeout, - client_info=self._client_info, - ) - - return self._inner_api_calls["streaming_recognize"]( - requests, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/speech_client_config.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/speech_client_config.py deleted file mode 100644 index fc9080f4770e..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/speech_client_config.py +++ /dev/null @@ -1,57 +0,0 @@ -config = { - "interfaces": { - "google.cloud.speech.v1.Speech": { - "retry_codes": { - "retry_policy_1_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"], - "no_retry_codes": [], - "no_retry_1_codes": [], - }, - "retry_params": { - "retry_policy_1_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 5000000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 5000000, - "total_timeout_millis": 5000000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_1_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 5000000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 5000000, - "total_timeout_millis": 5000000, - }, - }, - "methods": { - "Recognize": { - "timeout_millis": 5000000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "LongRunningRecognize": { - "timeout_millis": 5000000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "StreamingRecognize": { - "timeout_millis": 5000000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - }, - } - } -} diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/transports/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/transports/speech_grpc_transport.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/transports/speech_grpc_transport.py deleted file mode 100644 index 6ae4fafdc073..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1/gapic/transports/speech_grpc_transport.py +++ /dev/null @@ -1,162 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.speech_v1.proto import cloud_speech_pb2_grpc - - -class SpeechGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.speech.v1 Speech API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="speech.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "speech_stub": cloud_speech_pb2_grpc.SpeechStub(channel), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="speech.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def recognize(self): - """Return the gRPC stub for :meth:`SpeechClient.recognize`. - - Performs synchronous speech recognition: receive results after all audio - has been sent and processed. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["speech_stub"].Recognize - - @property - def long_running_recognize(self): - """Return the gRPC stub for :meth:`SpeechClient.long_running_recognize`. - - Performs asynchronous speech recognition: receive results via the - google.longrunning.Operations interface. Returns either an - ``Operation.error`` or an ``Operation.response`` which contains a - ``LongRunningRecognizeResponse`` message. For more information on - asynchronous speech recognition, see the - `how-to `__. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["speech_stub"].LongRunningRecognize - - @property - def streaming_recognize(self): - """Return the gRPC stub for :meth:`SpeechClient.streaming_recognize`. - - Performs bidirectional streaming speech recognition: receive results while - sending audio. This method is only available via the gRPC API (not REST). - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["speech_stub"].StreamingRecognize diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/helpers.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/helpers.py index 26f17dbe52d3..3342f36b6261 100644 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1/helpers.py +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/helpers.py @@ -29,8 +29,10 @@ def streaming_recognize( self, config, requests, + *, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=(), ): """Perform bi-directional speech recognition. @@ -43,16 +45,14 @@ def streaming_recognize( future. Example: - >>> from google.cloud.speech_v1 import enums - >>> from google.cloud.speech_v1 import SpeechClient - >>> from google.cloud.speech_v1 import types - >>> client = SpeechClient() - >>> config = types.StreamingRecognitionConfig( - ... config=types.RecognitionConfig( - ... encoding=enums.RecognitionConfig.AudioEncoding.FLAC, + >>> from google.cloud import speech_v1 + >>> client = speech_v1.SpeechClient() + >>> config = speech_v1.StreamingRecognitionConfig( + ... config=speech_v1.RecognitionConfig( + ... encoding=speech_v1.RecognitionConfig.AudioEncoding.FLAC, ... ), ... ) - >>> request = types.StreamingRecognizeRequest(audio_content=b'...') + >>> request = speech_v1.StreamingRecognizeRequest(audio_content=b'...') >>> requests = [request] >>> for element in client.streaming_recognize(config, requests): ... # process element @@ -69,16 +69,17 @@ def streaming_recognize( timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. Returns: Iterable[:class:`~.types.StreamingRecognizeResponse`] Raises: - :exc:`google.gax.errors.GaxError` if the RPC is aborted. :exc:`ValueError` if the parameters are invalid. """ return super(SpeechHelpers, self).streaming_recognize( - self._streaming_request_iterable(config, requests), + requests=self._streaming_request_iterable(config, requests), retry=retry, timeout=timeout, ) @@ -97,6 +98,8 @@ def _streaming_request_iterable(self, config, requests): correctly formatted input for :meth:`~.speech_v1.SpeechClient.streaming_recognize`. """ - yield self.types.StreamingRecognizeRequest(streaming_config=config) + # yield a dictionary rather than the request object since the helper + # is used by both the v1 and v1p1beta1 + yield {"streaming_config": config} for request in requests: yield request diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/proto/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/proto/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/proto/cloud_speech_pb2.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/proto/cloud_speech_pb2.py deleted file mode 100644 index 56808d60dddd..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1/proto/cloud_speech_pb2.py +++ /dev/null @@ -1,2805 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/speech_v1/proto/cloud_speech.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/speech_v1/proto/cloud_speech.proto", - package="google.cloud.speech.v1", - syntax="proto3", - serialized_options=b"\n\032com.google.cloud.speech.v1B\013SpeechProtoP\001Z\n\x06\x63onfig\x18\x01 \x01(\x0b\x32).google.cloud.speech.v1.RecognitionConfigB\x03\xe0\x41\x02\x12<\n\x05\x61udio\x18\x02 \x01(\x0b\x32(.google.cloud.speech.v1.RecognitionAudioB\x03\xe0\x41\x02"\x9b\x01\n\x1bLongRunningRecognizeRequest\x12>\n\x06\x63onfig\x18\x01 \x01(\x0b\x32).google.cloud.speech.v1.RecognitionConfigB\x03\xe0\x41\x02\x12<\n\x05\x61udio\x18\x02 \x01(\x0b\x32(.google.cloud.speech.v1.RecognitionAudioB\x03\xe0\x41\x02"\x99\x01\n\x19StreamingRecognizeRequest\x12N\n\x10streaming_config\x18\x01 \x01(\x0b\x32\x32.google.cloud.speech.v1.StreamingRecognitionConfigH\x00\x12\x17\n\raudio_content\x18\x02 \x01(\x0cH\x00\x42\x13\n\x11streaming_request"\x8f\x01\n\x1aStreamingRecognitionConfig\x12>\n\x06\x63onfig\x18\x01 \x01(\x0b\x32).google.cloud.speech.v1.RecognitionConfigB\x03\xe0\x41\x02\x12\x18\n\x10single_utterance\x18\x02 \x01(\x08\x12\x17\n\x0finterim_results\x18\x03 \x01(\x08"\xdf\x05\n\x11RecognitionConfig\x12I\n\x08\x65ncoding\x18\x01 \x01(\x0e\x32\x37.google.cloud.speech.v1.RecognitionConfig.AudioEncoding\x12\x19\n\x11sample_rate_hertz\x18\x02 \x01(\x05\x12\x1b\n\x13\x61udio_channel_count\x18\x07 \x01(\x05\x12/\n\'enable_separate_recognition_per_channel\x18\x0c \x01(\x08\x12\x1a\n\rlanguage_code\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x18\n\x10max_alternatives\x18\x04 \x01(\x05\x12\x18\n\x10profanity_filter\x18\x05 \x01(\x08\x12>\n\x0fspeech_contexts\x18\x06 \x03(\x0b\x32%.google.cloud.speech.v1.SpeechContext\x12 \n\x18\x65nable_word_time_offsets\x18\x08 \x01(\x08\x12$\n\x1c\x65nable_automatic_punctuation\x18\x0b \x01(\x08\x12L\n\x12\x64iarization_config\x18\x13 \x01(\x0b\x32\x30.google.cloud.speech.v1.SpeakerDiarizationConfig\x12=\n\x08metadata\x18\t \x01(\x0b\x32+.google.cloud.speech.v1.RecognitionMetadata\x12\r\n\x05model\x18\r \x01(\t\x12\x14\n\x0cuse_enhanced\x18\x0e \x01(\x08"\x8b\x01\n\rAudioEncoding\x12\x18\n\x14\x45NCODING_UNSPECIFIED\x10\x00\x12\x0c\n\x08LINEAR16\x10\x01\x12\x08\n\x04\x46LAC\x10\x02\x12\t\n\x05MULAW\x10\x03\x12\x07\n\x03\x41MR\x10\x04\x12\n\n\x06\x41MR_WB\x10\x05\x12\x0c\n\x08OGG_OPUS\x10\x06\x12\x1a\n\x16SPEEX_WITH_HEADER_BYTE\x10\x07"\x90\x01\n\x18SpeakerDiarizationConfig\x12"\n\x1a\x65nable_speaker_diarization\x18\x01 \x01(\x08\x12\x19\n\x11min_speaker_count\x18\x02 \x01(\x05\x12\x19\n\x11max_speaker_count\x18\x03 \x01(\x05\x12\x1a\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x05\x18\x01\xe0\x41\x03"\xa0\x08\n\x13RecognitionMetadata\x12U\n\x10interaction_type\x18\x01 \x01(\x0e\x32;.google.cloud.speech.v1.RecognitionMetadata.InteractionType\x12$\n\x1cindustry_naics_code_of_audio\x18\x03 \x01(\r\x12[\n\x13microphone_distance\x18\x04 \x01(\x0e\x32>.google.cloud.speech.v1.RecognitionMetadata.MicrophoneDistance\x12Z\n\x13original_media_type\x18\x05 \x01(\x0e\x32=.google.cloud.speech.v1.RecognitionMetadata.OriginalMediaType\x12^\n\x15recording_device_type\x18\x06 \x01(\x0e\x32?.google.cloud.speech.v1.RecognitionMetadata.RecordingDeviceType\x12\x1d\n\x15recording_device_name\x18\x07 \x01(\t\x12\x1a\n\x12original_mime_type\x18\x08 \x01(\t\x12\x13\n\x0b\x61udio_topic\x18\n \x01(\t"\xc5\x01\n\x0fInteractionType\x12 \n\x1cINTERACTION_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nDISCUSSION\x10\x01\x12\x10\n\x0cPRESENTATION\x10\x02\x12\x0e\n\nPHONE_CALL\x10\x03\x12\r\n\tVOICEMAIL\x10\x04\x12\x1b\n\x17PROFESSIONALLY_PRODUCED\x10\x05\x12\x10\n\x0cVOICE_SEARCH\x10\x06\x12\x11\n\rVOICE_COMMAND\x10\x07\x12\r\n\tDICTATION\x10\x08"d\n\x12MicrophoneDistance\x12#\n\x1fMICROPHONE_DISTANCE_UNSPECIFIED\x10\x00\x12\r\n\tNEARFIELD\x10\x01\x12\x0c\n\x08MIDFIELD\x10\x02\x12\x0c\n\x08\x46\x41RFIELD\x10\x03"N\n\x11OriginalMediaType\x12#\n\x1fORIGINAL_MEDIA_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05\x41UDIO\x10\x01\x12\t\n\x05VIDEO\x10\x02"\xa4\x01\n\x13RecordingDeviceType\x12%\n!RECORDING_DEVICE_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nSMARTPHONE\x10\x01\x12\x06\n\x02PC\x10\x02\x12\x0e\n\nPHONE_LINE\x10\x03\x12\x0b\n\x07VEHICLE\x10\x04\x12\x18\n\x14OTHER_OUTDOOR_DEVICE\x10\x05\x12\x17\n\x13OTHER_INDOOR_DEVICE\x10\x06" \n\rSpeechContext\x12\x0f\n\x07phrases\x18\x01 \x03(\t"D\n\x10RecognitionAudio\x12\x11\n\x07\x63ontent\x18\x01 \x01(\x0cH\x00\x12\r\n\x03uri\x18\x02 \x01(\tH\x00\x42\x0e\n\x0c\x61udio_source"U\n\x11RecognizeResponse\x12@\n\x07results\x18\x02 \x03(\x0b\x32/.google.cloud.speech.v1.SpeechRecognitionResult"`\n\x1cLongRunningRecognizeResponse\x12@\n\x07results\x18\x02 \x03(\x0b\x32/.google.cloud.speech.v1.SpeechRecognitionResult"\x9e\x01\n\x1cLongRunningRecognizeMetadata\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10last_update_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb1\x02\n\x1aStreamingRecognizeResponse\x12!\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x43\n\x07results\x18\x02 \x03(\x0b\x32\x32.google.cloud.speech.v1.StreamingRecognitionResult\x12]\n\x11speech_event_type\x18\x04 \x01(\x0e\x32\x42.google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType"L\n\x0fSpeechEventType\x12\x1c\n\x18SPEECH_EVENT_UNSPECIFIED\x10\x00\x12\x1b\n\x17\x45ND_OF_SINGLE_UTTERANCE\x10\x01"\xf2\x01\n\x1aStreamingRecognitionResult\x12J\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x34.google.cloud.speech.v1.SpeechRecognitionAlternative\x12\x10\n\x08is_final\x18\x02 \x01(\x08\x12\x11\n\tstability\x18\x03 \x01(\x02\x12\x32\n\x0fresult_end_time\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x63hannel_tag\x18\x05 \x01(\x05\x12\x1a\n\rlanguage_code\x18\x06 \x01(\tB\x03\xe0\x41\x03"z\n\x17SpeechRecognitionResult\x12J\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x34.google.cloud.speech.v1.SpeechRecognitionAlternative\x12\x13\n\x0b\x63hannel_tag\x18\x02 \x01(\x05"w\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12/\n\x05words\x18\x03 \x03(\x0b\x32 .google.cloud.speech.v1.WordInfo"\x8e\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03\x32\xd1\x04\n\x06Speech\x12\x90\x01\n\tRecognize\x12(.google.cloud.speech.v1.RecognizeRequest\x1a).google.cloud.speech.v1.RecognizeResponse".\x82\xd3\xe4\x93\x02\x19"\x14/v1/speech:recognize:\x01*\xda\x41\x0c\x63onfig,audio\x12\xe4\x01\n\x14LongRunningRecognize\x12\x33.google.cloud.speech.v1.LongRunningRecognizeRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02$"\x1f/v1/speech:longrunningrecognize:\x01*\xda\x41\x0c\x63onfig,audio\xca\x41<\n\x1cLongRunningRecognizeResponse\x12\x1cLongRunningRecognizeMetadata\x12\x81\x01\n\x12StreamingRecognize\x12\x31.google.cloud.speech.v1.StreamingRecognizeRequest\x1a\x32.google.cloud.speech.v1.StreamingRecognizeResponse"\x00(\x01\x30\x01\x1aI\xca\x41\x15speech.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBr\n\x1a\x63om.google.cloud.speech.v1B\x0bSpeechProtoP\x01Z`__. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.StreamingRecognizeRequest) - }, -) -_sym_db.RegisterMessage(StreamingRecognizeRequest) - -StreamingRecognitionConfig = _reflection.GeneratedProtocolMessageType( - "StreamingRecognitionConfig", - (_message.Message,), - { - "DESCRIPTOR": _STREAMINGRECOGNITIONCONFIG, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """Provides information to the recognizer that specifies how to process - the request. - - Attributes: - config: - Required. Provides information to the recognizer that - specifies how to process the request. - single_utterance: - If ``false`` or omitted, the recognizer will perform - continuous recognition (continuing to wait for and process - audio even if the user pauses speaking) until the client - closes the input stream (gRPC API) or until the maximum time - limit has been reached. May return multiple - ``StreamingRecognitionResult``\ s with the ``is_final`` flag - set to ``true``. If ``true``, the recognizer will detect a - single spoken utterance. When it detects that the user has - paused or stopped speaking, it will return an - ``END_OF_SINGLE_UTTERANCE`` event and cease recognition. It - will return no more than one ``StreamingRecognitionResult`` - with the ``is_final`` flag set to ``true``. - interim_results: - If ``true``, interim results (tentative hypotheses) may be - returned as they become available (these interim results are - indicated with the ``is_final=false`` flag). If ``false`` or - omitted, only ``is_final=true`` result(s) are returned. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.StreamingRecognitionConfig) - }, -) -_sym_db.RegisterMessage(StreamingRecognitionConfig) - -RecognitionConfig = _reflection.GeneratedProtocolMessageType( - "RecognitionConfig", - (_message.Message,), - { - "DESCRIPTOR": _RECOGNITIONCONFIG, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """Provides information to the recognizer that specifies how to process - the request. - - Attributes: - encoding: - Encoding of audio data sent in all ``RecognitionAudio`` - messages. This field is optional for ``FLAC`` and ``WAV`` - audio files and required for all other audio formats. For - details, see [AudioEncoding][google.cloud.speech.v1.Recognitio - nConfig.AudioEncoding]. - sample_rate_hertz: - Sample rate in Hertz of the audio data sent in all - ``RecognitionAudio`` messages. Valid values are: 8000-48000. - 16000 is optimal. For best results, set the sampling rate of - the audio source to 16000 Hz. If that’s not possible, use the - native sample rate of the audio source (instead of re- - sampling). This field is optional for FLAC and WAV audio - files, but is required for all other audio formats. For - details, see [AudioEncoding][google.cloud.speech.v1.Recognitio - nConfig.AudioEncoding]. - audio_channel_count: - The number of channels in the input audio data. ONLY set this - for MULTI-CHANNEL recognition. Valid values for LINEAR16 and - FLAC are ``1``-``8``. Valid values for OGG_OPUS are ‘1’-‘254’. - Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE - is only ``1``. If ``0`` or omitted, defaults to one channel - (mono). Note: We only recognize the first channel by default. - To perform independent recognition on each channel set - ``enable_separate_recognition_per_channel`` to ‘true’. - enable_separate_recognition_per_channel: - This needs to be set to ``true`` explicitly and - ``audio_channel_count`` > 1 to get each channel recognized - separately. The recognition result will contain a - ``channel_tag`` field to state which channel that result - belongs to. If this is not true, we will only recognize the - first channel. The request is billed cumulatively for all - channels recognized: ``audio_channel_count`` multiplied by the - length of the audio. - language_code: - Required. The language of the supplied audio as a `BCP-47 - `__ language - tag. Example: “en-US”. See `Language Support - `__ - for a list of the currently supported language codes. - max_alternatives: - Maximum number of recognition hypotheses to be returned. - Specifically, the maximum number of - ``SpeechRecognitionAlternative`` messages within each - ``SpeechRecognitionResult``. The server may return fewer than - ``max_alternatives``. Valid values are ``0``-``30``. A value - of ``0`` or ``1`` will return a maximum of one. If omitted, - will return a maximum of one. - profanity_filter: - If set to ``true``, the server will attempt to filter out - profanities, replacing all but the initial character in each - filtered word with asterisks, e.g. "f***". If set to ``false`` - or omitted, profanities won’t be filtered out. - speech_contexts: - Array of - [SpeechContext][google.cloud.speech.v1.SpeechContext]. A means - to provide context to assist the speech recognition. For more - information, see `speech adaptation - `__. - enable_word_time_offsets: - If ``true``, the top result includes a list of words and the - start and end time offsets (timestamps) for those words. If - ``false``, no word-level time offset information is returned. - The default is ``false``. - enable_automatic_punctuation: - If ‘true’, adds punctuation to recognition result hypotheses. - This feature is only available in select languages. Setting - this for requests in other languages has no effect at all. The - default ‘false’ value does not add punctuation to result - hypotheses. Note: This is currently offered as an experimental - service, complimentary to all users. In the future this may be - exclusively available as a premium feature. - diarization_config: - Config to enable speaker diarization and set additional - parameters to make diarization better suited for your - application. Note: When this is enabled, we send all the words - from the beginning of the audio for the top alternative in - every consecutive STREAMING responses. This is done in order - to improve our speaker tags as our models learn to identify - the speakers in the conversation over time. For non-streaming - requests, the diarization results will be provided only in the - top alternative of the FINAL SpeechRecognitionResult. - metadata: - Metadata regarding this request. - model: - Which model to select for the given request. Select the model - best suited to your domain to get best results. If a model is - not explicitly specified, then we auto-select a model based on - the parameters in the RecognitionConfig. .. raw:: html - .. raw:: html .. raw:: html .. raw:: html .. raw:: html .. raw:: - html .. raw:: html .. raw:: html .. raw:: html .. raw:: html .. - raw:: html .. - raw:: html .. raw:: html .. raw:: html - .. raw:: html - .. raw:: html .. raw:: html .. raw:: html - .. raw:: html - .. raw:: html .. raw:: html - .. raw:: html
Model - .. raw:: html Description - .. raw:: html
command_and_search .. - raw:: html Best for short - queries such as voice commands or voice search. .. raw:: html -
phone_call .. raw:: html Best for audio that originated from a - phone call (typically recorded at an 8khz sampling rate). .. - raw:: html
video .. raw:: html Best for audio that originated from - from video or includes multiple speakers. Ideally the audio is - recorded at a 16khz or greater sampling rate. This is a - premium model that costs more than the standard rate. .. - raw:: html
default .. raw:: html Best for audio that is not one of the - specific audio models. For example, long-form audio. Ideally - the audio is high-fidelity, recorded at a 16khz or greater - sampling rate. .. raw:: html
- use_enhanced: - Set to true to use an enhanced model for speech recognition. - If ``use_enhanced`` is set to true and the ``model`` field is - not set, then an appropriate enhanced model is chosen if an - enhanced model exists for the audio. If ``use_enhanced`` is - true and an enhanced version of the specified model does not - exist, then the speech is recognized using the standard - version of the specified model. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.RecognitionConfig) - }, -) -_sym_db.RegisterMessage(RecognitionConfig) - -SpeakerDiarizationConfig = _reflection.GeneratedProtocolMessageType( - "SpeakerDiarizationConfig", - (_message.Message,), - { - "DESCRIPTOR": _SPEAKERDIARIZATIONCONFIG, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """Config to enable speaker diarization. - - Attributes: - enable_speaker_diarization: - If ‘true’, enables speaker detection for each recognized word - in the top alternative of the recognition result using a - speaker_tag provided in the WordInfo. - min_speaker_count: - Minimum number of speakers in the conversation. This range - gives you more flexibility by allowing the system to - automatically determine the correct number of speakers. If not - set, the default value is 2. - max_speaker_count: - Maximum number of speakers in the conversation. This range - gives you more flexibility by allowing the system to - automatically determine the correct number of speakers. If not - set, the default value is 6. - speaker_tag: - Unused. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.SpeakerDiarizationConfig) - }, -) -_sym_db.RegisterMessage(SpeakerDiarizationConfig) - -RecognitionMetadata = _reflection.GeneratedProtocolMessageType( - "RecognitionMetadata", - (_message.Message,), - { - "DESCRIPTOR": _RECOGNITIONMETADATA, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """Description of audio data to be recognized. - - Attributes: - interaction_type: - The use case most closely describing the audio content to be - recognized. - industry_naics_code_of_audio: - The industry vertical to which this speech recognition request - most closely applies. This is most indicative of the topics - contained in the audio. Use the 6-digit NAICS code to identify - the industry vertical - see https://www.naics.com/search/. - microphone_distance: - The audio type that most closely describes the audio being - recognized. - original_media_type: - The original media the speech was recorded on. - recording_device_type: - The type of device the speech was recorded with. - recording_device_name: - The device used to make the recording. Examples ‘Nexus 5X’ or - ‘Polycom SoundStation IP 6000’ or ‘POTS’ or ‘VoIP’ or - ‘Cardioid Microphone’. - original_mime_type: - Mime type of the original audio file. For example - ``audio/m4a``, ``audio/x-alaw-basic``, ``audio/mp3``, - ``audio/3gpp``. A list of possible audio mime types is - maintained at http://www.iana.org/assignments/media- - types/media-types.xhtml#audio - audio_topic: - Description of the content. Eg. “Recordings of federal supreme - court hearings from 2012”. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.RecognitionMetadata) - }, -) -_sym_db.RegisterMessage(RecognitionMetadata) - -SpeechContext = _reflection.GeneratedProtocolMessageType( - "SpeechContext", - (_message.Message,), - { - "DESCRIPTOR": _SPEECHCONTEXT, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """Provides “hints” to the speech recognizer to favor specific words and - phrases in the results. - - Attributes: - phrases: - A list of strings containing words and phrases “hints” so that - the speech recognition is more likely to recognize them. This - can be used to improve the accuracy for specific words and - phrases, for example, if specific commands are typically - spoken by the user. This can also be used to add additional - words to the vocabulary of the recognizer. See `usage limits - `__. - List items can also be set to classes for groups of words that - represent common concepts that occur in natural language. For - example, rather than providing phrase hints for every month of - the year, using the $MONTH class improves the likelihood of - correctly transcribing audio that includes months. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.SpeechContext) - }, -) -_sym_db.RegisterMessage(SpeechContext) - -RecognitionAudio = _reflection.GeneratedProtocolMessageType( - "RecognitionAudio", - (_message.Message,), - { - "DESCRIPTOR": _RECOGNITIONAUDIO, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """Contains audio data in the encoding specified in the - ``RecognitionConfig``. Either ``content`` or ``uri`` must be supplied. - Supplying both or neither returns - [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. - See `content limits `__. - - Attributes: - audio_source: - The audio source, which is either inline content or a Google - Cloud Storage uri. - content: - The audio data bytes encoded as specified in - ``RecognitionConfig``. Note: as with all bytes fields, proto - buffers use a pure binary representation, whereas JSON - representations use base64. - uri: - URI that points to a file that contains audio data bytes as - specified in ``RecognitionConfig``. The file must not be - compressed (for example, gzip). Currently, only Google Cloud - Storage URIs are supported, which must be specified in the - following format: ``gs://bucket_name/object_name`` (other URI - formats return [google.rpc.Code.INVALID_ARGUMENT][google.rpc.C - ode.INVALID_ARGUMENT]). For more information, see `Request - URIs `__. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.RecognitionAudio) - }, -) -_sym_db.RegisterMessage(RecognitionAudio) - -RecognizeResponse = _reflection.GeneratedProtocolMessageType( - "RecognizeResponse", - (_message.Message,), - { - "DESCRIPTOR": _RECOGNIZERESPONSE, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """The only message returned to the client by the ``Recognize`` method. - It contains the result as zero or more sequential - ``SpeechRecognitionResult`` messages. - - Attributes: - results: - Sequential list of transcription results corresponding to - sequential portions of audio. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.RecognizeResponse) - }, -) -_sym_db.RegisterMessage(RecognizeResponse) - -LongRunningRecognizeResponse = _reflection.GeneratedProtocolMessageType( - "LongRunningRecognizeResponse", - (_message.Message,), - { - "DESCRIPTOR": _LONGRUNNINGRECOGNIZERESPONSE, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """The only message returned to the client by the - ``LongRunningRecognize`` method. It contains the result as zero or - more sequential ``SpeechRecognitionResult`` messages. It is included - in the ``result.response`` field of the ``Operation`` returned by the - ``GetOperation`` call of the ``google::longrunning::Operations`` - service. - - Attributes: - results: - Sequential list of transcription results corresponding to - sequential portions of audio. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.LongRunningRecognizeResponse) - }, -) -_sym_db.RegisterMessage(LongRunningRecognizeResponse) - -LongRunningRecognizeMetadata = _reflection.GeneratedProtocolMessageType( - "LongRunningRecognizeMetadata", - (_message.Message,), - { - "DESCRIPTOR": _LONGRUNNINGRECOGNIZEMETADATA, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """Describes the progress of a long-running ``LongRunningRecognize`` - call. It is included in the ``metadata`` field of the ``Operation`` - returned by the ``GetOperation`` call of the - ``google::longrunning::Operations`` service. - - Attributes: - progress_percent: - Approximate percentage of audio processed thus far. Guaranteed - to be 100 when the audio is fully processed and the results - are available. - start_time: - Time when the request was received. - last_update_time: - Time of the most recent processing update. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.LongRunningRecognizeMetadata) - }, -) -_sym_db.RegisterMessage(LongRunningRecognizeMetadata) - -StreamingRecognizeResponse = _reflection.GeneratedProtocolMessageType( - "StreamingRecognizeResponse", - (_message.Message,), - { - "DESCRIPTOR": _STREAMINGRECOGNIZERESPONSE, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """\ ``StreamingRecognizeResponse`` is the only message returned to the - client by ``StreamingRecognize``. A series of zero or more - ``StreamingRecognizeResponse`` messages are streamed back to the - client. If there is no recognizable audio, and ``single_utterance`` is - set to false, then no messages are streamed back to the client. - Here’s an example of a series of ten ``StreamingRecognizeResponse``\ s - that might be returned while processing audio: 1. results { - alternatives { transcript: “tube” } stability: 0.01 } 2. results { - alternatives { transcript: “to be a” } stability: 0.01 } 3. results { - alternatives { transcript: “to be” } stability: 0.9 } results { - alternatives { transcript: " or not to be" } stability: 0.01 } 4. - results { alternatives { transcript: “to be or not to be” confidence: - 0.92 } alternatives { transcript: “to bee or not to bee” } is_final: - true } 5. results { alternatives { transcript: " that’s" } stability: - 0.01 } 6. results { alternatives { transcript: " that is" } - stability: 0.9 } results { alternatives { transcript: " the - question" } stability: 0.01 } 7. results { alternatives { - transcript: " that is the question" confidence: 0.98 } alternatives - { transcript: " that was the question" } is_final: true } Notes: - - Only two of the above responses #4 and #7 contain final results; - they are indicated by ``is_final: true``. Concatenating these - together generates the full transcript: “to be or not to be that is - the question”. - The others contain interim ``results``. #3 and - #6 contain two interim ``results``: the first portion has a high - stability and is less likely to change; the second portion has a - low stability and is very likely to change. A UI designer might - choose to show only high stability ``results``. - The specific - ``stability`` and ``confidence`` values shown above are only for - illustrative purposes. Actual values may vary. - In each response, - only one of these fields will be set: ``error``, - ``speech_event_type``, or one or more (repeated) ``results``. - - Attributes: - error: - If set, returns a [google.rpc.Status][google.rpc.Status] - message that specifies the error for the operation. - results: - This repeated list contains zero or more results that - correspond to consecutive portions of the audio currently - being processed. It contains zero or one ``is_final=true`` - result (the newly settled portion), followed by zero or more - ``is_final=false`` results (the interim results). - speech_event_type: - Indicates the type of speech event. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.StreamingRecognizeResponse) - }, -) -_sym_db.RegisterMessage(StreamingRecognizeResponse) - -StreamingRecognitionResult = _reflection.GeneratedProtocolMessageType( - "StreamingRecognitionResult", - (_message.Message,), - { - "DESCRIPTOR": _STREAMINGRECOGNITIONRESULT, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """A streaming speech recognition result corresponding to a portion of - the audio that is currently being processed. - - Attributes: - alternatives: - May contain one or more recognition hypotheses (up to the - maximum specified in ``max_alternatives``). These alternatives - are ordered in terms of accuracy, with the top (first) - alternative being the most probable, as ranked by the - recognizer. - is_final: - If ``false``, this ``StreamingRecognitionResult`` represents - an interim result that may change. If ``true``, this is the - final time the speech service will return this particular - ``StreamingRecognitionResult``, the recognizer will not return - any further hypotheses for this portion of the transcript and - corresponding audio. - stability: - An estimate of the likelihood that the recognizer will not - change its guess about this interim result. Values range from - 0.0 (completely unstable) to 1.0 (completely stable). This - field is only provided for interim results - (``is_final=false``). The default of 0.0 is a sentinel value - indicating ``stability`` was not set. - result_end_time: - Time offset of the end of this result relative to the - beginning of the audio. - channel_tag: - For multi-channel audio, this is the channel number - corresponding to the recognized result for the audio from that - channel. For audio_channel_count = N, its output values can - range from ‘1’ to ‘N’. - language_code: - The `BCP-47 `__ - language tag of the language in this result. This language - code was detected to have the most likelihood of being spoken - in the audio. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.StreamingRecognitionResult) - }, -) -_sym_db.RegisterMessage(StreamingRecognitionResult) - -SpeechRecognitionResult = _reflection.GeneratedProtocolMessageType( - "SpeechRecognitionResult", - (_message.Message,), - { - "DESCRIPTOR": _SPEECHRECOGNITIONRESULT, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """A speech recognition result corresponding to a portion of the audio. - - Attributes: - alternatives: - May contain one or more recognition hypotheses (up to the - maximum specified in ``max_alternatives``). These alternatives - are ordered in terms of accuracy, with the top (first) - alternative being the most probable, as ranked by the - recognizer. - channel_tag: - For multi-channel audio, this is the channel number - corresponding to the recognized result for the audio from that - channel. For audio_channel_count = N, its output values can - range from ‘1’ to ‘N’. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.SpeechRecognitionResult) - }, -) -_sym_db.RegisterMessage(SpeechRecognitionResult) - -SpeechRecognitionAlternative = _reflection.GeneratedProtocolMessageType( - "SpeechRecognitionAlternative", - (_message.Message,), - { - "DESCRIPTOR": _SPEECHRECOGNITIONALTERNATIVE, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """Alternative hypotheses (a.k.a. n-best list). - - Attributes: - transcript: - Transcript text representing the words that the user spoke. - confidence: - The confidence estimate between 0.0 and 1.0. A higher number - indicates an estimated greater likelihood that the recognized - words are correct. This field is set only for the top - alternative of a non-streaming result or, of a streaming - result where ``is_final=true``. This field is not guaranteed - to be accurate and users should not rely on it to be always - provided. The default of 0.0 is a sentinel value indicating - ``confidence`` was not set. - words: - A list of word-specific information for each recognized word. - Note: When ``enable_speaker_diarization`` is true, you will - see all the words from the beginning of the audio. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.SpeechRecognitionAlternative) - }, -) -_sym_db.RegisterMessage(SpeechRecognitionAlternative) - -WordInfo = _reflection.GeneratedProtocolMessageType( - "WordInfo", - (_message.Message,), - { - "DESCRIPTOR": _WORDINFO, - "__module__": "google.cloud.speech_v1.proto.cloud_speech_pb2", - "__doc__": """Word-specific information for recognized words. - - Attributes: - start_time: - Time offset relative to the beginning of the audio, and - corresponding to the start of the spoken word. This field is - only set if ``enable_word_time_offsets=true`` and only in the - top hypothesis. This is an experimental feature and the - accuracy of the time offset can vary. - end_time: - Time offset relative to the beginning of the audio, and - corresponding to the end of the spoken word. This field is - only set if ``enable_word_time_offsets=true`` and only in the - top hypothesis. This is an experimental feature and the - accuracy of the time offset can vary. - word: - The word corresponding to this set of information. - speaker_tag: - A distinct integer value is assigned for every speaker within - the audio. This field specifies which one of those speakers - was detected to have spoken this word. Value ranges from ‘1’ - to diarization_speaker_count. speaker_tag is set if - enable_speaker_diarization = ‘true’ and only in the top - alternative. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1.WordInfo) - }, -) -_sym_db.RegisterMessage(WordInfo) - - -DESCRIPTOR._options = None -_RECOGNIZEREQUEST.fields_by_name["config"]._options = None -_RECOGNIZEREQUEST.fields_by_name["audio"]._options = None -_LONGRUNNINGRECOGNIZEREQUEST.fields_by_name["config"]._options = None -_LONGRUNNINGRECOGNIZEREQUEST.fields_by_name["audio"]._options = None -_STREAMINGRECOGNITIONCONFIG.fields_by_name["config"]._options = None -_RECOGNITIONCONFIG.fields_by_name["language_code"]._options = None -_SPEAKERDIARIZATIONCONFIG.fields_by_name["speaker_tag"]._options = None -_STREAMINGRECOGNITIONRESULT.fields_by_name["language_code"]._options = None -_WORDINFO.fields_by_name["speaker_tag"]._options = None - -_SPEECH = _descriptor.ServiceDescriptor( - name="Speech", - full_name="google.cloud.speech.v1.Speech", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\025speech.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=4262, - serialized_end=4855, - methods=[ - _descriptor.MethodDescriptor( - name="Recognize", - full_name="google.cloud.speech.v1.Speech.Recognize", - index=0, - containing_service=None, - input_type=_RECOGNIZEREQUEST, - output_type=_RECOGNIZERESPONSE, - serialized_options=b'\202\323\344\223\002\031"\024/v1/speech:recognize:\001*\332A\014config,audio', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="LongRunningRecognize", - full_name="google.cloud.speech.v1.Speech.LongRunningRecognize", - index=1, - containing_service=None, - input_type=_LONGRUNNINGRECOGNIZEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002$"\037/v1/speech:longrunningrecognize:\001*\332A\014config,audio\312A<\n\034LongRunningRecognizeResponse\022\034LongRunningRecognizeMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="StreamingRecognize", - full_name="google.cloud.speech.v1.Speech.StreamingRecognize", - index=2, - containing_service=None, - input_type=_STREAMINGRECOGNIZEREQUEST, - output_type=_STREAMINGRECOGNIZERESPONSE, - serialized_options=None, - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_SPEECH) - -DESCRIPTOR.services_by_name["Speech"] = _SPEECH - -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/proto/cloud_speech_pb2_grpc.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/proto/cloud_speech_pb2_grpc.py deleted file mode 100644 index e56beb6de3b6..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1/proto/cloud_speech_pb2_grpc.py +++ /dev/null @@ -1,181 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.speech_v1.proto import ( - cloud_speech_pb2 as google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) - - -class SpeechStub(object): - """Service that implements Google Cloud Speech API. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.Recognize = channel.unary_unary( - "/google.cloud.speech.v1.Speech/Recognize", - request_serializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.RecognizeRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.RecognizeResponse.FromString, - ) - self.LongRunningRecognize = channel.unary_unary( - "/google.cloud.speech.v1.Speech/LongRunningRecognize", - request_serializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.LongRunningRecognizeRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.StreamingRecognize = channel.stream_stream( - "/google.cloud.speech.v1.Speech/StreamingRecognize", - request_serializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeResponse.FromString, - ) - - -class SpeechServicer(object): - """Service that implements Google Cloud Speech API. - """ - - def Recognize(self, request, context): - """Performs synchronous speech recognition: receive results after all audio - has been sent and processed. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def LongRunningRecognize(self, request, context): - """Performs asynchronous speech recognition: receive results via the - google.longrunning.Operations interface. Returns either an - `Operation.error` or an `Operation.response` which contains - a `LongRunningRecognizeResponse` message. - For more information on asynchronous speech recognition, see the - [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def StreamingRecognize(self, request_iterator, context): - """Performs bidirectional streaming speech recognition: receive results while - sending audio. This method is only available via the gRPC API (not REST). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_SpeechServicer_to_server(servicer, server): - rpc_method_handlers = { - "Recognize": grpc.unary_unary_rpc_method_handler( - servicer.Recognize, - request_deserializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.RecognizeRequest.FromString, - response_serializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.RecognizeResponse.SerializeToString, - ), - "LongRunningRecognize": grpc.unary_unary_rpc_method_handler( - servicer.LongRunningRecognize, - request_deserializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.LongRunningRecognizeRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "StreamingRecognize": grpc.stream_stream_rpc_method_handler( - servicer.StreamingRecognize, - request_deserializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeRequest.FromString, - response_serializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.cloud.speech.v1.Speech", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class Speech(object): - """Service that implements Google Cloud Speech API. - """ - - @staticmethod - def Recognize( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.speech.v1.Speech/Recognize", - google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.RecognizeRequest.SerializeToString, - google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.RecognizeResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def LongRunningRecognize( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.speech.v1.Speech/LongRunningRecognize", - google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.LongRunningRecognizeRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def StreamingRecognize( - request_iterator, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.stream_stream( - request_iterator, - target, - "/google.cloud.speech.v1.Speech/StreamingRecognize", - google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeRequest.SerializeToString, - google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/py.typed b/packages/google-cloud-python-speech/google/cloud/speech_v1/py.typed new file mode 100644 index 000000000000..02081c09b9ab --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-speech package uses inline types. diff --git a/packages/google-cloud-python-speech/google/cloud/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/__init__.py similarity index 66% rename from packages/google-cloud-python-speech/google/cloud/__init__.py rename to packages/google-cloud-python-speech/google/cloud/speech_v1/services/__init__.py index dd3a9f485275..42ffdf2bc43d 100644 --- a/packages/google-cloud-python-speech/google/cloud/__init__.py +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/__init__.py @@ -1,22 +1,16 @@ -# Copyright 2018 Google LLC +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) +# diff --git a/packages/google-cloud-python-speech/google/cloud/speech.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/__init__.py similarity index 70% rename from packages/google-cloud-python-speech/google/cloud/speech.py rename to packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/__init__.py index 244d2cd79f71..72f816da4255 100644 --- a/packages/google-cloud-python-speech/google/cloud/speech.py +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/__init__.py @@ -1,29 +1,24 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import - -from google.cloud.speech_v1 import SpeechClient -from google.cloud.speech_v1 import enums -from google.cloud.speech_v1 import types - +from .client import SpeechClient +from .async_client import SpeechAsyncClient __all__ = ( - "enums", - "types", "SpeechClient", + "SpeechAsyncClient", ) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/async_client.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/async_client.py new file mode 100644 index 000000000000..debb4a40804d --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/async_client.py @@ -0,0 +1,408 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, AsyncIterable, AsyncIterator, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.speech_v1.types import cloud_speech +from google.rpc import status_pb2 as status # type: ignore + +from .transports.base import SpeechTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import SpeechGrpcAsyncIOTransport +from .client import SpeechClient + + +class SpeechAsyncClient: + """Service that implements Google Cloud Speech API.""" + + _client: SpeechClient + + DEFAULT_ENDPOINT = SpeechClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = SpeechClient.DEFAULT_MTLS_ENDPOINT + + from_service_account_file = SpeechClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(SpeechClient).get_transport_class, type(SpeechClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, SpeechTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the speech client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.SpeechTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = SpeechClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def recognize( + self, + request: cloud_speech.RecognizeRequest = None, + *, + config: cloud_speech.RecognitionConfig = None, + audio: cloud_speech.RecognitionAudio = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cloud_speech.RecognizeResponse: + r"""Performs synchronous speech recognition: receive + results after all audio has been sent and processed. + + Args: + request (:class:`~.cloud_speech.RecognizeRequest`): + The request object. The top-level message sent by the + client for the `Recognize` method. + config (:class:`~.cloud_speech.RecognitionConfig`): + Required. Provides information to the + recognizer that specifies how to process + the request. + This corresponds to the ``config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + audio (:class:`~.cloud_speech.RecognitionAudio`): + Required. The audio data to be + recognized. + This corresponds to the ``audio`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cloud_speech.RecognizeResponse: + The only message returned to the client by the + ``Recognize`` method. It contains the result as zero or + more sequential ``SpeechRecognitionResult`` messages. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([config, audio]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cloud_speech.RecognizeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if config is not None: + request.config = config + if audio is not None: + request.audio = audio + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.recognize, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=5000.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def long_running_recognize( + self, + request: cloud_speech.LongRunningRecognizeRequest = None, + *, + config: cloud_speech.RecognitionConfig = None, + audio: cloud_speech.RecognitionAudio = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Performs asynchronous speech recognition: receive results via + the google.longrunning.Operations interface. Returns either an + ``Operation.error`` or an ``Operation.response`` which contains + a ``LongRunningRecognizeResponse`` message. For more information + on asynchronous speech recognition, see the + `how-to `__. + + Args: + request (:class:`~.cloud_speech.LongRunningRecognizeRequest`): + The request object. The top-level message sent by the + client for the `LongRunningRecognize` method. + config (:class:`~.cloud_speech.RecognitionConfig`): + Required. Provides information to the + recognizer that specifies how to process + the request. + This corresponds to the ``config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + audio (:class:`~.cloud_speech.RecognitionAudio`): + Required. The audio data to be + recognized. + This corresponds to the ``audio`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.cloud_speech.LongRunningRecognizeResponse``: + The only message returned to the client by the + ``LongRunningRecognize`` method. It contains the result + as zero or more sequential ``SpeechRecognitionResult`` + messages. It is included in the ``result.response`` + field of the ``Operation`` returned by the + ``GetOperation`` call of the + ``google::longrunning::Operations`` service. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([config, audio]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cloud_speech.LongRunningRecognizeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if config is not None: + request.config = config + if audio is not None: + request.audio = audio + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.long_running_recognize, + default_timeout=5000.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + cloud_speech.LongRunningRecognizeResponse, + metadata_type=cloud_speech.LongRunningRecognizeMetadata, + ) + + # Done; return the response. + return response + + def streaming_recognize( + self, + requests: AsyncIterator[cloud_speech.StreamingRecognizeRequest] = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> AsyncIterable[cloud_speech.StreamingRecognizeResponse]: + r"""Performs bidirectional streaming speech recognition: + receive results while sending audio. This method is only + available via the gRPC API (not REST). + + Args: + requests (AsyncIterator[`~.cloud_speech.StreamingRecognizeRequest`]): + The request object AsyncIterator. The top-level message sent by the + client for the `StreamingRecognize` method. Multiple + `StreamingRecognizeRequest` messages are sent. The first + message must contain a `streaming_config` message and + must not contain `audio_content`. All subsequent + messages must contain `audio_content` and must not + contain a `streaming_config` message. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[~.cloud_speech.StreamingRecognizeResponse]: + ``StreamingRecognizeResponse`` is the only message + returned to the client by ``StreamingRecognize``. A + series of zero or more ``StreamingRecognizeResponse`` + messages are streamed back to the client. If there is no + recognizable audio, and ``single_utterance`` is set to + false, then no messages are streamed back to the client. + + Here's an example of a series of ten + ``StreamingRecognizeResponse``\ s that might be returned + while processing audio: + + 1. results { alternatives { transcript: "tube" } + stability: 0.01 } + + 2. results { alternatives { transcript: "to be a" } + stability: 0.01 } + + 3. results { alternatives { transcript: "to be" } + stability: 0.9 } results { alternatives { transcript: + " or not to be" } stability: 0.01 } + + 4. results { alternatives { transcript: "to be or not to + be" confidence: 0.92 } alternatives { transcript: "to + bee or not to bee" } is_final: true } + + 5. results { alternatives { transcript: " that's" } + stability: 0.01 } + + 6. results { alternatives { transcript: " that is" } + stability: 0.9 } results { alternatives { transcript: + " the question" } stability: 0.01 } + + 7. results { alternatives { transcript: " that is the + question" confidence: 0.98 } alternatives { + transcript: " that was the question" } is_final: true + } + + Notes: + + - Only two of the above responses #4 and #7 contain + final results; they are indicated by + ``is_final: true``. Concatenating these together + generates the full transcript: "to be or not to be + that is the question". + + - The others contain interim ``results``. #3 and #6 + contain two interim ``results``: the first portion + has a high stability and is less likely to change; + the second portion has a low stability and is very + likely to change. A UI designer might choose to show + only high stability ``results``. + + - The specific ``stability`` and ``confidence`` values + shown above are only for illustrative purposes. + Actual values may vary. + + - In each response, only one of these fields will be + set: ``error``, ``speech_event_type``, or one or more + (repeated) ``results``. + + """ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.streaming_recognize, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=5000.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-speech",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("SpeechAsyncClient",) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/client.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/client.py new file mode 100644 index 000000000000..43bc479a0f2c --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/client.py @@ -0,0 +1,548 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import ( + Callable, + Dict, + Optional, + Iterable, + Iterator, + Sequence, + Tuple, + Type, + Union, +) +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.speech_v1.types import cloud_speech +from google.rpc import status_pb2 as status # type: ignore + +from .transports.base import SpeechTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import SpeechGrpcTransport +from .transports.grpc_asyncio import SpeechGrpcAsyncIOTransport + + +class SpeechClientMeta(type): + """Metaclass for the Speech client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[SpeechTransport]] + _transport_registry["grpc"] = SpeechGrpcTransport + _transport_registry["grpc_asyncio"] = SpeechGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[SpeechTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SpeechClient(metaclass=SpeechClientMeta): + """Service that implements Google Cloud Speech API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "speech.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, SpeechTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the speech client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.SpeechTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SpeechTransport): + # transport is a SpeechTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def recognize( + self, + request: cloud_speech.RecognizeRequest = None, + *, + config: cloud_speech.RecognitionConfig = None, + audio: cloud_speech.RecognitionAudio = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cloud_speech.RecognizeResponse: + r"""Performs synchronous speech recognition: receive + results after all audio has been sent and processed. + + Args: + request (:class:`~.cloud_speech.RecognizeRequest`): + The request object. The top-level message sent by the + client for the `Recognize` method. + config (:class:`~.cloud_speech.RecognitionConfig`): + Required. Provides information to the + recognizer that specifies how to process + the request. + This corresponds to the ``config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + audio (:class:`~.cloud_speech.RecognitionAudio`): + Required. The audio data to be + recognized. + This corresponds to the ``audio`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cloud_speech.RecognizeResponse: + The only message returned to the client by the + ``Recognize`` method. It contains the result as zero or + more sequential ``SpeechRecognitionResult`` messages. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([config, audio]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a cloud_speech.RecognizeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cloud_speech.RecognizeRequest): + request = cloud_speech.RecognizeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if config is not None: + request.config = config + if audio is not None: + request.audio = audio + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.recognize] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def long_running_recognize( + self, + request: cloud_speech.LongRunningRecognizeRequest = None, + *, + config: cloud_speech.RecognitionConfig = None, + audio: cloud_speech.RecognitionAudio = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Performs asynchronous speech recognition: receive results via + the google.longrunning.Operations interface. Returns either an + ``Operation.error`` or an ``Operation.response`` which contains + a ``LongRunningRecognizeResponse`` message. For more information + on asynchronous speech recognition, see the + `how-to `__. + + Args: + request (:class:`~.cloud_speech.LongRunningRecognizeRequest`): + The request object. The top-level message sent by the + client for the `LongRunningRecognize` method. + config (:class:`~.cloud_speech.RecognitionConfig`): + Required. Provides information to the + recognizer that specifies how to process + the request. + This corresponds to the ``config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + audio (:class:`~.cloud_speech.RecognitionAudio`): + Required. The audio data to be + recognized. + This corresponds to the ``audio`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.cloud_speech.LongRunningRecognizeResponse``: + The only message returned to the client by the + ``LongRunningRecognize`` method. It contains the result + as zero or more sequential ``SpeechRecognitionResult`` + messages. It is included in the ``result.response`` + field of the ``Operation`` returned by the + ``GetOperation`` call of the + ``google::longrunning::Operations`` service. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([config, audio]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a cloud_speech.LongRunningRecognizeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cloud_speech.LongRunningRecognizeRequest): + request = cloud_speech.LongRunningRecognizeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if config is not None: + request.config = config + if audio is not None: + request.audio = audio + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.long_running_recognize] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + cloud_speech.LongRunningRecognizeResponse, + metadata_type=cloud_speech.LongRunningRecognizeMetadata, + ) + + # Done; return the response. + return response + + def streaming_recognize( + self, + requests: Iterator[cloud_speech.StreamingRecognizeRequest] = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[cloud_speech.StreamingRecognizeResponse]: + r"""Performs bidirectional streaming speech recognition: + receive results while sending audio. This method is only + available via the gRPC API (not REST). + + Args: + requests (Iterator[`~.cloud_speech.StreamingRecognizeRequest`]): + The request object iterator. The top-level message sent by the + client for the `StreamingRecognize` method. Multiple + `StreamingRecognizeRequest` messages are sent. The first + message must contain a `streaming_config` message and + must not contain `audio_content`. All subsequent + messages must contain `audio_content` and must not + contain a `streaming_config` message. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[~.cloud_speech.StreamingRecognizeResponse]: + ``StreamingRecognizeResponse`` is the only message + returned to the client by ``StreamingRecognize``. A + series of zero or more ``StreamingRecognizeResponse`` + messages are streamed back to the client. If there is no + recognizable audio, and ``single_utterance`` is set to + false, then no messages are streamed back to the client. + + Here's an example of a series of ten + ``StreamingRecognizeResponse``\ s that might be returned + while processing audio: + + 1. results { alternatives { transcript: "tube" } + stability: 0.01 } + + 2. results { alternatives { transcript: "to be a" } + stability: 0.01 } + + 3. results { alternatives { transcript: "to be" } + stability: 0.9 } results { alternatives { transcript: + " or not to be" } stability: 0.01 } + + 4. results { alternatives { transcript: "to be or not to + be" confidence: 0.92 } alternatives { transcript: "to + bee or not to bee" } is_final: true } + + 5. results { alternatives { transcript: " that's" } + stability: 0.01 } + + 6. results { alternatives { transcript: " that is" } + stability: 0.9 } results { alternatives { transcript: + " the question" } stability: 0.01 } + + 7. results { alternatives { transcript: " that is the + question" confidence: 0.98 } alternatives { + transcript: " that was the question" } is_final: true + } + + Notes: + + - Only two of the above responses #4 and #7 contain + final results; they are indicated by + ``is_final: true``. Concatenating these together + generates the full transcript: "to be or not to be + that is the question". + + - The others contain interim ``results``. #3 and #6 + contain two interim ``results``: the first portion + has a high stability and is less likely to change; + the second portion has a low stability and is very + likely to change. A UI designer might choose to show + only high stability ``results``. + + - The specific ``stability`` and ``confidence`` values + shown above are only for illustrative purposes. + Actual values may vary. + + - In each response, only one of these fields will be + set: ``error``, ``speech_event_type``, or one or more + (repeated) ``results``. + + """ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.streaming_recognize] + + # Send the request. + response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-speech",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("SpeechClient",) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/__init__.py new file mode 100644 index 000000000000..3ec5f07eb105 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import SpeechTransport +from .grpc import SpeechGrpcTransport +from .grpc_asyncio import SpeechGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SpeechTransport]] +_transport_registry["grpc"] = SpeechGrpcTransport +_transport_registry["grpc_asyncio"] = SpeechGrpcAsyncIOTransport + + +__all__ = ( + "SpeechTransport", + "SpeechGrpcTransport", + "SpeechGrpcAsyncIOTransport", +) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/base.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/base.py new file mode 100644 index 000000000000..a4adf0805c58 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/base.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.speech_v1.types import cloud_speech +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-speech",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class SpeechTransport(abc.ABC): + """Abstract transport class for Speech.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "speech.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.recognize: gapic_v1.method.wrap_method( + self.recognize, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=5000.0, + client_info=client_info, + ), + self.long_running_recognize: gapic_v1.method.wrap_method( + self.long_running_recognize, + default_timeout=5000.0, + client_info=client_info, + ), + self.streaming_recognize: gapic_v1.method.wrap_method( + self.streaming_recognize, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=5000.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def recognize( + self, + ) -> typing.Callable[ + [cloud_speech.RecognizeRequest], + typing.Union[ + cloud_speech.RecognizeResponse, + typing.Awaitable[cloud_speech.RecognizeResponse], + ], + ]: + raise NotImplementedError() + + @property + def long_running_recognize( + self, + ) -> typing.Callable[ + [cloud_speech.LongRunningRecognizeRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def streaming_recognize( + self, + ) -> typing.Callable[ + [cloud_speech.StreamingRecognizeRequest], + typing.Union[ + cloud_speech.StreamingRecognizeResponse, + typing.Awaitable[cloud_speech.StreamingRecognizeResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("SpeechTransport",) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/grpc.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/grpc.py new file mode 100644 index 000000000000..90ba7ead3b85 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/grpc.py @@ -0,0 +1,340 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.speech_v1.types import cloud_speech +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import SpeechTransport, DEFAULT_CLIENT_INFO + + +class SpeechGrpcTransport(SpeechTransport): + """gRPC backend transport for Speech. + + Service that implements Google Cloud Speech API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "speech.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "speech.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def recognize( + self, + ) -> Callable[[cloud_speech.RecognizeRequest], cloud_speech.RecognizeResponse]: + r"""Return a callable for the recognize method over gRPC. + + Performs synchronous speech recognition: receive + results after all audio has been sent and processed. + + Returns: + Callable[[~.RecognizeRequest], + ~.RecognizeResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "recognize" not in self._stubs: + self._stubs["recognize"] = self.grpc_channel.unary_unary( + "/google.cloud.speech.v1.Speech/Recognize", + request_serializer=cloud_speech.RecognizeRequest.serialize, + response_deserializer=cloud_speech.RecognizeResponse.deserialize, + ) + return self._stubs["recognize"] + + @property + def long_running_recognize( + self, + ) -> Callable[[cloud_speech.LongRunningRecognizeRequest], operations.Operation]: + r"""Return a callable for the long running recognize method over gRPC. + + Performs asynchronous speech recognition: receive results via + the google.longrunning.Operations interface. Returns either an + ``Operation.error`` or an ``Operation.response`` which contains + a ``LongRunningRecognizeResponse`` message. For more information + on asynchronous speech recognition, see the + `how-to `__. + + Returns: + Callable[[~.LongRunningRecognizeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "long_running_recognize" not in self._stubs: + self._stubs["long_running_recognize"] = self.grpc_channel.unary_unary( + "/google.cloud.speech.v1.Speech/LongRunningRecognize", + request_serializer=cloud_speech.LongRunningRecognizeRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["long_running_recognize"] + + @property + def streaming_recognize( + self, + ) -> Callable[ + [cloud_speech.StreamingRecognizeRequest], + cloud_speech.StreamingRecognizeResponse, + ]: + r"""Return a callable for the streaming recognize method over gRPC. + + Performs bidirectional streaming speech recognition: + receive results while sending audio. This method is only + available via the gRPC API (not REST). + + Returns: + Callable[[~.StreamingRecognizeRequest], + ~.StreamingRecognizeResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "streaming_recognize" not in self._stubs: + self._stubs["streaming_recognize"] = self.grpc_channel.stream_stream( + "/google.cloud.speech.v1.Speech/StreamingRecognize", + request_serializer=cloud_speech.StreamingRecognizeRequest.serialize, + response_deserializer=cloud_speech.StreamingRecognizeResponse.deserialize, + ) + return self._stubs["streaming_recognize"] + + +__all__ = ("SpeechGrpcTransport",) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/grpc_asyncio.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/grpc_asyncio.py new file mode 100644 index 000000000000..4ec87eb70b02 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/services/speech/transports/grpc_asyncio.py @@ -0,0 +1,344 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.speech_v1.types import cloud_speech +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import SpeechTransport, DEFAULT_CLIENT_INFO +from .grpc import SpeechGrpcTransport + + +class SpeechGrpcAsyncIOTransport(SpeechTransport): + """gRPC AsyncIO backend transport for Speech. + + Service that implements Google Cloud Speech API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "speech.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "speech.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def recognize( + self, + ) -> Callable[ + [cloud_speech.RecognizeRequest], Awaitable[cloud_speech.RecognizeResponse] + ]: + r"""Return a callable for the recognize method over gRPC. + + Performs synchronous speech recognition: receive + results after all audio has been sent and processed. + + Returns: + Callable[[~.RecognizeRequest], + Awaitable[~.RecognizeResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "recognize" not in self._stubs: + self._stubs["recognize"] = self.grpc_channel.unary_unary( + "/google.cloud.speech.v1.Speech/Recognize", + request_serializer=cloud_speech.RecognizeRequest.serialize, + response_deserializer=cloud_speech.RecognizeResponse.deserialize, + ) + return self._stubs["recognize"] + + @property + def long_running_recognize( + self, + ) -> Callable[ + [cloud_speech.LongRunningRecognizeRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the long running recognize method over gRPC. + + Performs asynchronous speech recognition: receive results via + the google.longrunning.Operations interface. Returns either an + ``Operation.error`` or an ``Operation.response`` which contains + a ``LongRunningRecognizeResponse`` message. For more information + on asynchronous speech recognition, see the + `how-to `__. + + Returns: + Callable[[~.LongRunningRecognizeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "long_running_recognize" not in self._stubs: + self._stubs["long_running_recognize"] = self.grpc_channel.unary_unary( + "/google.cloud.speech.v1.Speech/LongRunningRecognize", + request_serializer=cloud_speech.LongRunningRecognizeRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["long_running_recognize"] + + @property + def streaming_recognize( + self, + ) -> Callable[ + [cloud_speech.StreamingRecognizeRequest], + Awaitable[cloud_speech.StreamingRecognizeResponse], + ]: + r"""Return a callable for the streaming recognize method over gRPC. + + Performs bidirectional streaming speech recognition: + receive results while sending audio. This method is only + available via the gRPC API (not REST). + + Returns: + Callable[[~.StreamingRecognizeRequest], + Awaitable[~.StreamingRecognizeResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "streaming_recognize" not in self._stubs: + self._stubs["streaming_recognize"] = self.grpc_channel.stream_stream( + "/google.cloud.speech.v1.Speech/StreamingRecognize", + request_serializer=cloud_speech.StreamingRecognizeRequest.serialize, + response_deserializer=cloud_speech.StreamingRecognizeResponse.deserialize, + ) + return self._stubs["streaming_recognize"] + + +__all__ = ("SpeechGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/types.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/types.py deleted file mode 100644 index 2f486e2500db..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1/types.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.speech_v1.proto import cloud_speech_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 - - -_shared_modules = [ - operations_pb2, - any_pb2, - duration_pb2, - timestamp_pb2, - status_pb2, -] - -_local_modules = [ - cloud_speech_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.speech_v1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/types/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/types/__init__.py new file mode 100644 index 000000000000..19eeeafdca94 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/types/__init__.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .cloud_speech import ( + RecognizeRequest, + LongRunningRecognizeRequest, + StreamingRecognizeRequest, + StreamingRecognitionConfig, + RecognitionConfig, + SpeakerDiarizationConfig, + RecognitionMetadata, + SpeechContext, + RecognitionAudio, + RecognizeResponse, + LongRunningRecognizeResponse, + LongRunningRecognizeMetadata, + StreamingRecognizeResponse, + StreamingRecognitionResult, + SpeechRecognitionResult, + SpeechRecognitionAlternative, + WordInfo, +) + + +__all__ = ( + "RecognizeRequest", + "LongRunningRecognizeRequest", + "StreamingRecognizeRequest", + "StreamingRecognitionConfig", + "RecognitionConfig", + "SpeakerDiarizationConfig", + "RecognitionMetadata", + "SpeechContext", + "RecognitionAudio", + "RecognizeResponse", + "LongRunningRecognizeResponse", + "LongRunningRecognizeMetadata", + "StreamingRecognizeResponse", + "StreamingRecognitionResult", + "SpeechRecognitionResult", + "SpeechRecognitionAlternative", + "WordInfo", +) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1/types/cloud_speech.py b/packages/google-cloud-python-speech/google/cloud/speech_v1/types/cloud_speech.py new file mode 100644 index 000000000000..0687b3771535 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1/types/cloud_speech.py @@ -0,0 +1,850 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.rpc import status_pb2 as status # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.speech.v1", + manifest={ + "RecognizeRequest", + "LongRunningRecognizeRequest", + "StreamingRecognizeRequest", + "StreamingRecognitionConfig", + "RecognitionConfig", + "SpeakerDiarizationConfig", + "RecognitionMetadata", + "SpeechContext", + "RecognitionAudio", + "RecognizeResponse", + "LongRunningRecognizeResponse", + "LongRunningRecognizeMetadata", + "StreamingRecognizeResponse", + "StreamingRecognitionResult", + "SpeechRecognitionResult", + "SpeechRecognitionAlternative", + "WordInfo", + }, +) + + +class RecognizeRequest(proto.Message): + r"""The top-level message sent by the client for the ``Recognize`` + method. + + Attributes: + config (~.cloud_speech.RecognitionConfig): + Required. Provides information to the + recognizer that specifies how to process the + request. + audio (~.cloud_speech.RecognitionAudio): + Required. The audio data to be recognized. + """ + + config = proto.Field(proto.MESSAGE, number=1, message="RecognitionConfig",) + + audio = proto.Field(proto.MESSAGE, number=2, message="RecognitionAudio",) + + +class LongRunningRecognizeRequest(proto.Message): + r"""The top-level message sent by the client for the + ``LongRunningRecognize`` method. + + Attributes: + config (~.cloud_speech.RecognitionConfig): + Required. Provides information to the + recognizer that specifies how to process the + request. + audio (~.cloud_speech.RecognitionAudio): + Required. The audio data to be recognized. + """ + + config = proto.Field(proto.MESSAGE, number=1, message="RecognitionConfig",) + + audio = proto.Field(proto.MESSAGE, number=2, message="RecognitionAudio",) + + +class StreamingRecognizeRequest(proto.Message): + r"""The top-level message sent by the client for the + ``StreamingRecognize`` method. Multiple + ``StreamingRecognizeRequest`` messages are sent. The first message + must contain a ``streaming_config`` message and must not contain + ``audio_content``. All subsequent messages must contain + ``audio_content`` and must not contain a ``streaming_config`` + message. + + Attributes: + streaming_config (~.cloud_speech.StreamingRecognitionConfig): + Provides information to the recognizer that specifies how to + process the request. The first ``StreamingRecognizeRequest`` + message must contain a ``streaming_config`` message. + audio_content (bytes): + The audio data to be recognized. Sequential chunks of audio + data are sent in sequential ``StreamingRecognizeRequest`` + messages. The first ``StreamingRecognizeRequest`` message + must not contain ``audio_content`` data and all subsequent + ``StreamingRecognizeRequest`` messages must contain + ``audio_content`` data. The audio bytes must be encoded as + specified in ``RecognitionConfig``. Note: as with all bytes + fields, proto buffers use a pure binary representation (not + base64). See `content + limits `__. + """ + + streaming_config = proto.Field( + proto.MESSAGE, + number=1, + oneof="streaming_request", + message="StreamingRecognitionConfig", + ) + + audio_content = proto.Field(proto.BYTES, number=2, oneof="streaming_request") + + +class StreamingRecognitionConfig(proto.Message): + r"""Provides information to the recognizer that specifies how to + process the request. + + Attributes: + config (~.cloud_speech.RecognitionConfig): + Required. Provides information to the + recognizer that specifies how to process the + request. + single_utterance (bool): + If ``false`` or omitted, the recognizer will perform + continuous recognition (continuing to wait for and process + audio even if the user pauses speaking) until the client + closes the input stream (gRPC API) or until the maximum time + limit has been reached. May return multiple + ``StreamingRecognitionResult``\ s with the ``is_final`` flag + set to ``true``. + + If ``true``, the recognizer will detect a single spoken + utterance. When it detects that the user has paused or + stopped speaking, it will return an + ``END_OF_SINGLE_UTTERANCE`` event and cease recognition. It + will return no more than one ``StreamingRecognitionResult`` + with the ``is_final`` flag set to ``true``. + interim_results (bool): + If ``true``, interim results (tentative hypotheses) may be + returned as they become available (these interim results are + indicated with the ``is_final=false`` flag). If ``false`` or + omitted, only ``is_final=true`` result(s) are returned. + """ + + config = proto.Field(proto.MESSAGE, number=1, message="RecognitionConfig",) + + single_utterance = proto.Field(proto.BOOL, number=2) + + interim_results = proto.Field(proto.BOOL, number=3) + + +class RecognitionConfig(proto.Message): + r"""Provides information to the recognizer that specifies how to + process the request. + + Attributes: + encoding (~.cloud_speech.RecognitionConfig.AudioEncoding): + Encoding of audio data sent in all ``RecognitionAudio`` + messages. This field is optional for ``FLAC`` and ``WAV`` + audio files and required for all other audio formats. For + details, see + [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. + sample_rate_hertz (int): + Sample rate in Hertz of the audio data sent in all + ``RecognitionAudio`` messages. Valid values are: 8000-48000. + 16000 is optimal. For best results, set the sampling rate of + the audio source to 16000 Hz. If that's not possible, use + the native sample rate of the audio source (instead of + re-sampling). This field is optional for FLAC and WAV audio + files, but is required for all other audio formats. For + details, see + [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding]. + audio_channel_count (int): + The number of channels in the input audio data. ONLY set + this for MULTI-CHANNEL recognition. Valid values for + LINEAR16 and FLAC are ``1``-``8``. Valid values for OGG_OPUS + are '1'-'254'. Valid value for MULAW, AMR, AMR_WB and + SPEEX_WITH_HEADER_BYTE is only ``1``. If ``0`` or omitted, + defaults to one channel (mono). Note: We only recognize the + first channel by default. To perform independent recognition + on each channel set + ``enable_separate_recognition_per_channel`` to 'true'. + enable_separate_recognition_per_channel (bool): + This needs to be set to ``true`` explicitly and + ``audio_channel_count`` > 1 to get each channel recognized + separately. The recognition result will contain a + ``channel_tag`` field to state which channel that result + belongs to. If this is not true, we will only recognize the + first channel. The request is billed cumulatively for all + channels recognized: ``audio_channel_count`` multiplied by + the length of the audio. + language_code (str): + Required. The language of the supplied audio as a + `BCP-47 `__ + language tag. Example: "en-US". See `Language + Support `__ + for a list of the currently supported language codes. + max_alternatives (int): + Maximum number of recognition hypotheses to be returned. + Specifically, the maximum number of + ``SpeechRecognitionAlternative`` messages within each + ``SpeechRecognitionResult``. The server may return fewer + than ``max_alternatives``. Valid values are ``0``-``30``. A + value of ``0`` or ``1`` will return a maximum of one. If + omitted, will return a maximum of one. + profanity_filter (bool): + If set to ``true``, the server will attempt to filter out + profanities, replacing all but the initial character in each + filtered word with asterisks, e.g. "f***". If set to + ``false`` or omitted, profanities won't be filtered out. + speech_contexts (Sequence[~.cloud_speech.SpeechContext]): + Array of + [SpeechContext][google.cloud.speech.v1.SpeechContext]. A + means to provide context to assist the speech recognition. + For more information, see `speech + adaptation `__. + enable_word_time_offsets (bool): + If ``true``, the top result includes a list of words and the + start and end time offsets (timestamps) for those words. If + ``false``, no word-level time offset information is + returned. The default is ``false``. + enable_automatic_punctuation (bool): + If 'true', adds punctuation to recognition + result hypotheses. This feature is only + available in select languages. Setting this for + requests in other languages has no effect at + all. The default 'false' value does not add + punctuation to result hypotheses. Note: This is + currently offered as an experimental service, + complimentary to all users. In the future this + may be exclusively available as a premium + feature. + diarization_config (~.cloud_speech.SpeakerDiarizationConfig): + Config to enable speaker diarization and set + additional parameters to make diarization better + suited for your application. Note: When this is + enabled, we send all the words from the + beginning of the audio for the top alternative + in every consecutive STREAMING responses. This + is done in order to improve our speaker tags as + our models learn to identify the speakers in the + conversation over time. For non-streaming + requests, the diarization results will be + provided only in the top alternative of the + FINAL SpeechRecognitionResult. + metadata (~.cloud_speech.RecognitionMetadata): + Metadata regarding this request. + model (str): + Which model to select for the given request. Select the + model best suited to your domain to get best results. If a + model is not explicitly specified, then we auto-select a + model based on the parameters in the RecognitionConfig. + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + +
ModelDescription
command_and_searchBest for short queries such as voice commands or voice search.
phone_callBest for audio that originated from a phone call (typically + recorded at an 8khz sampling rate).
videoBest for audio that originated from from video or includes multiple + speakers. Ideally the audio is recorded at a 16khz or greater + sampling rate. This is a premium model that costs more than the + standard rate.
defaultBest for audio that is not one of the specific audio models. + For example, long-form audio. Ideally the audio is high-fidelity, + recorded at a 16khz or greater sampling rate.
+ use_enhanced (bool): + Set to true to use an enhanced model for speech recognition. + If ``use_enhanced`` is set to true and the ``model`` field + is not set, then an appropriate enhanced model is chosen if + an enhanced model exists for the audio. + + If ``use_enhanced`` is true and an enhanced version of the + specified model does not exist, then the speech is + recognized using the standard version of the specified + model. + """ + + class AudioEncoding(proto.Enum): + r"""The encoding of the audio data sent in the request. + + All encodings support only 1 channel (mono) audio, unless the + ``audio_channel_count`` and + ``enable_separate_recognition_per_channel`` fields are set. + + For best results, the audio source should be captured and + transmitted using a lossless encoding (``FLAC`` or ``LINEAR16``). + The accuracy of the speech recognition can be reduced if lossy + codecs are used to capture or transmit audio, particularly if + background noise is present. Lossy codecs include ``MULAW``, + ``AMR``, ``AMR_WB``, ``OGG_OPUS``, ``SPEEX_WITH_HEADER_BYTE``, and + ``MP3``. + + The ``FLAC`` and ``WAV`` audio file formats include a header that + describes the included audio content. You can request recognition + for ``WAV`` files that contain either ``LINEAR16`` or ``MULAW`` + encoded audio. If you send ``FLAC`` or ``WAV`` audio file format in + your request, you do not need to specify an ``AudioEncoding``; the + audio encoding format is determined from the file header. If you + specify an ``AudioEncoding`` when you send send ``FLAC`` or ``WAV`` + audio, the encoding configuration must match the encoding described + in the audio header; otherwise the request returns an + [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] + error code. + """ + ENCODING_UNSPECIFIED = 0 + LINEAR16 = 1 + FLAC = 2 + MULAW = 3 + AMR = 4 + AMR_WB = 5 + OGG_OPUS = 6 + SPEEX_WITH_HEADER_BYTE = 7 + + encoding = proto.Field(proto.ENUM, number=1, enum=AudioEncoding,) + + sample_rate_hertz = proto.Field(proto.INT32, number=2) + + audio_channel_count = proto.Field(proto.INT32, number=7) + + enable_separate_recognition_per_channel = proto.Field(proto.BOOL, number=12) + + language_code = proto.Field(proto.STRING, number=3) + + max_alternatives = proto.Field(proto.INT32, number=4) + + profanity_filter = proto.Field(proto.BOOL, number=5) + + speech_contexts = proto.RepeatedField( + proto.MESSAGE, number=6, message="SpeechContext", + ) + + enable_word_time_offsets = proto.Field(proto.BOOL, number=8) + + enable_automatic_punctuation = proto.Field(proto.BOOL, number=11) + + diarization_config = proto.Field( + proto.MESSAGE, number=19, message="SpeakerDiarizationConfig", + ) + + metadata = proto.Field(proto.MESSAGE, number=9, message="RecognitionMetadata",) + + model = proto.Field(proto.STRING, number=13) + + use_enhanced = proto.Field(proto.BOOL, number=14) + + +class SpeakerDiarizationConfig(proto.Message): + r"""Config to enable speaker diarization. + + Attributes: + enable_speaker_diarization (bool): + If 'true', enables speaker detection for each recognized + word in the top alternative of the recognition result using + a speaker_tag provided in the WordInfo. + min_speaker_count (int): + Minimum number of speakers in the + conversation. This range gives you more + flexibility by allowing the system to + automatically determine the correct number of + speakers. If not set, the default value is 2. + max_speaker_count (int): + Maximum number of speakers in the + conversation. This range gives you more + flexibility by allowing the system to + automatically determine the correct number of + speakers. If not set, the default value is 6. + speaker_tag (int): + Unused. + """ + + enable_speaker_diarization = proto.Field(proto.BOOL, number=1) + + min_speaker_count = proto.Field(proto.INT32, number=2) + + max_speaker_count = proto.Field(proto.INT32, number=3) + + speaker_tag = proto.Field(proto.INT32, number=5) + + +class RecognitionMetadata(proto.Message): + r"""Description of audio data to be recognized. + + Attributes: + interaction_type (~.cloud_speech.RecognitionMetadata.InteractionType): + The use case most closely describing the + audio content to be recognized. + industry_naics_code_of_audio (int): + The industry vertical to which this speech + recognition request most closely applies. This + is most indicative of the topics contained in + the audio. Use the 6-digit NAICS code to + identify the industry vertical - see + https://www.naics.com/search/. + microphone_distance (~.cloud_speech.RecognitionMetadata.MicrophoneDistance): + The audio type that most closely describes + the audio being recognized. + original_media_type (~.cloud_speech.RecognitionMetadata.OriginalMediaType): + The original media the speech was recorded + on. + recording_device_type (~.cloud_speech.RecognitionMetadata.RecordingDeviceType): + The type of device the speech was recorded + with. + recording_device_name (str): + The device used to make the recording. + Examples 'Nexus 5X' or 'Polycom SoundStation IP + 6000' or 'POTS' or 'VoIP' or 'Cardioid + Microphone'. + original_mime_type (str): + Mime type of the original audio file. For example + ``audio/m4a``, ``audio/x-alaw-basic``, ``audio/mp3``, + ``audio/3gpp``. A list of possible audio mime types is + maintained at + http://www.iana.org/assignments/media-types/media-types.xhtml#audio + audio_topic (str): + Description of the content. Eg. "Recordings + of federal supreme court hearings from 2012". + """ + + class InteractionType(proto.Enum): + r"""Use case categories that the audio recognition request can be + described by. + """ + INTERACTION_TYPE_UNSPECIFIED = 0 + DISCUSSION = 1 + PRESENTATION = 2 + PHONE_CALL = 3 + VOICEMAIL = 4 + PROFESSIONALLY_PRODUCED = 5 + VOICE_SEARCH = 6 + VOICE_COMMAND = 7 + DICTATION = 8 + + class MicrophoneDistance(proto.Enum): + r"""Enumerates the types of capture settings describing an audio + file. + """ + MICROPHONE_DISTANCE_UNSPECIFIED = 0 + NEARFIELD = 1 + MIDFIELD = 2 + FARFIELD = 3 + + class OriginalMediaType(proto.Enum): + r"""The original media the speech was recorded on.""" + ORIGINAL_MEDIA_TYPE_UNSPECIFIED = 0 + AUDIO = 1 + VIDEO = 2 + + class RecordingDeviceType(proto.Enum): + r"""The type of device the speech was recorded with.""" + RECORDING_DEVICE_TYPE_UNSPECIFIED = 0 + SMARTPHONE = 1 + PC = 2 + PHONE_LINE = 3 + VEHICLE = 4 + OTHER_OUTDOOR_DEVICE = 5 + OTHER_INDOOR_DEVICE = 6 + + interaction_type = proto.Field(proto.ENUM, number=1, enum=InteractionType,) + + industry_naics_code_of_audio = proto.Field(proto.UINT32, number=3) + + microphone_distance = proto.Field(proto.ENUM, number=4, enum=MicrophoneDistance,) + + original_media_type = proto.Field(proto.ENUM, number=5, enum=OriginalMediaType,) + + recording_device_type = proto.Field(proto.ENUM, number=6, enum=RecordingDeviceType,) + + recording_device_name = proto.Field(proto.STRING, number=7) + + original_mime_type = proto.Field(proto.STRING, number=8) + + audio_topic = proto.Field(proto.STRING, number=10) + + +class SpeechContext(proto.Message): + r"""Provides "hints" to the speech recognizer to favor specific + words and phrases in the results. + + Attributes: + phrases (Sequence[str]): + A list of strings containing words and phrases "hints" so + that the speech recognition is more likely to recognize + them. This can be used to improve the accuracy for specific + words and phrases, for example, if specific commands are + typically spoken by the user. This can also be used to add + additional words to the vocabulary of the recognizer. See + `usage + limits `__. + + List items can also be set to classes for groups of words + that represent common concepts that occur in natural + language. For example, rather than providing phrase hints + for every month of the year, using the $MONTH class improves + the likelihood of correctly transcribing audio that includes + months. + """ + + phrases = proto.RepeatedField(proto.STRING, number=1) + + +class RecognitionAudio(proto.Message): + r"""Contains audio data in the encoding specified in the + ``RecognitionConfig``. Either ``content`` or ``uri`` must be + supplied. Supplying both or neither returns + [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. + See `content + limits `__. + + Attributes: + content (bytes): + The audio data bytes encoded as specified in + ``RecognitionConfig``. Note: as with all bytes fields, proto + buffers use a pure binary representation, whereas JSON + representations use base64. + uri (str): + URI that points to a file that contains audio data bytes as + specified in ``RecognitionConfig``. The file must not be + compressed (for example, gzip). Currently, only Google Cloud + Storage URIs are supported, which must be specified in the + following format: ``gs://bucket_name/object_name`` (other + URI formats return + [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). + For more information, see `Request + URIs `__. + """ + + content = proto.Field(proto.BYTES, number=1, oneof="audio_source") + + uri = proto.Field(proto.STRING, number=2, oneof="audio_source") + + +class RecognizeResponse(proto.Message): + r"""The only message returned to the client by the ``Recognize`` method. + It contains the result as zero or more sequential + ``SpeechRecognitionResult`` messages. + + Attributes: + results (Sequence[~.cloud_speech.SpeechRecognitionResult]): + Sequential list of transcription results + corresponding to sequential portions of audio. + """ + + results = proto.RepeatedField( + proto.MESSAGE, number=2, message="SpeechRecognitionResult", + ) + + +class LongRunningRecognizeResponse(proto.Message): + r"""The only message returned to the client by the + ``LongRunningRecognize`` method. It contains the result as zero or + more sequential ``SpeechRecognitionResult`` messages. It is included + in the ``result.response`` field of the ``Operation`` returned by + the ``GetOperation`` call of the ``google::longrunning::Operations`` + service. + + Attributes: + results (Sequence[~.cloud_speech.SpeechRecognitionResult]): + Sequential list of transcription results + corresponding to sequential portions of audio. + """ + + results = proto.RepeatedField( + proto.MESSAGE, number=2, message="SpeechRecognitionResult", + ) + + +class LongRunningRecognizeMetadata(proto.Message): + r"""Describes the progress of a long-running ``LongRunningRecognize`` + call. It is included in the ``metadata`` field of the ``Operation`` + returned by the ``GetOperation`` call of the + ``google::longrunning::Operations`` service. + + Attributes: + progress_percent (int): + Approximate percentage of audio processed + thus far. Guaranteed to be 100 when the audio is + fully processed and the results are available. + start_time (~.timestamp.Timestamp): + Time when the request was received. + last_update_time (~.timestamp.Timestamp): + Time of the most recent processing update. + """ + + progress_percent = proto.Field(proto.INT32, number=1) + + start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + last_update_time = proto.Field( + proto.MESSAGE, number=3, message=timestamp.Timestamp, + ) + + +class StreamingRecognizeResponse(proto.Message): + r"""``StreamingRecognizeResponse`` is the only message returned to the + client by ``StreamingRecognize``. A series of zero or more + ``StreamingRecognizeResponse`` messages are streamed back to the + client. If there is no recognizable audio, and ``single_utterance`` + is set to false, then no messages are streamed back to the client. + + Here's an example of a series of ten + ``StreamingRecognizeResponse``\ s that might be returned while + processing audio: + + 1. results { alternatives { transcript: "tube" } stability: 0.01 } + + 2. results { alternatives { transcript: "to be a" } stability: 0.01 + } + + 3. results { alternatives { transcript: "to be" } stability: 0.9 } + results { alternatives { transcript: " or not to be" } stability: + 0.01 } + + 4. results { alternatives { transcript: "to be or not to be" + confidence: 0.92 } alternatives { transcript: "to bee or not to + bee" } is_final: true } + + 5. results { alternatives { transcript: " that's" } stability: 0.01 + } + + 6. results { alternatives { transcript: " that is" } stability: 0.9 + } results { alternatives { transcript: " the question" } + stability: 0.01 } + + 7. results { alternatives { transcript: " that is the question" + confidence: 0.98 } alternatives { transcript: " that was the + question" } is_final: true } + + Notes: + + - Only two of the above responses #4 and #7 contain final results; + they are indicated by ``is_final: true``. Concatenating these + together generates the full transcript: "to be or not to be that + is the question". + + - The others contain interim ``results``. #3 and #6 contain two + interim ``results``: the first portion has a high stability and + is less likely to change; the second portion has a low stability + and is very likely to change. A UI designer might choose to show + only high stability ``results``. + + - The specific ``stability`` and ``confidence`` values shown above + are only for illustrative purposes. Actual values may vary. + + - In each response, only one of these fields will be set: + ``error``, ``speech_event_type``, or one or more (repeated) + ``results``. + + Attributes: + error (~.status.Status): + If set, returns a [google.rpc.Status][google.rpc.Status] + message that specifies the error for the operation. + results (Sequence[~.cloud_speech.StreamingRecognitionResult]): + This repeated list contains zero or more results that + correspond to consecutive portions of the audio currently + being processed. It contains zero or one ``is_final=true`` + result (the newly settled portion), followed by zero or more + ``is_final=false`` results (the interim results). + speech_event_type (~.cloud_speech.StreamingRecognizeResponse.SpeechEventType): + Indicates the type of speech event. + """ + + class SpeechEventType(proto.Enum): + r"""Indicates the type of speech event.""" + SPEECH_EVENT_UNSPECIFIED = 0 + END_OF_SINGLE_UTTERANCE = 1 + + error = proto.Field(proto.MESSAGE, number=1, message=status.Status,) + + results = proto.RepeatedField( + proto.MESSAGE, number=2, message="StreamingRecognitionResult", + ) + + speech_event_type = proto.Field(proto.ENUM, number=4, enum=SpeechEventType,) + + +class StreamingRecognitionResult(proto.Message): + r"""A streaming speech recognition result corresponding to a + portion of the audio that is currently being processed. + + Attributes: + alternatives (Sequence[~.cloud_speech.SpeechRecognitionAlternative]): + May contain one or more recognition hypotheses (up to the + maximum specified in ``max_alternatives``). These + alternatives are ordered in terms of accuracy, with the top + (first) alternative being the most probable, as ranked by + the recognizer. + is_final (bool): + If ``false``, this ``StreamingRecognitionResult`` represents + an interim result that may change. If ``true``, this is the + final time the speech service will return this particular + ``StreamingRecognitionResult``, the recognizer will not + return any further hypotheses for this portion of the + transcript and corresponding audio. + stability (float): + An estimate of the likelihood that the recognizer will not + change its guess about this interim result. Values range + from 0.0 (completely unstable) to 1.0 (completely stable). + This field is only provided for interim results + (``is_final=false``). The default of 0.0 is a sentinel value + indicating ``stability`` was not set. + result_end_time (~.duration.Duration): + Time offset of the end of this result + relative to the beginning of the audio. + channel_tag (int): + For multi-channel audio, this is the channel number + corresponding to the recognized result for the audio from + that channel. For audio_channel_count = N, its output values + can range from '1' to 'N'. + language_code (str): + The + `BCP-47 `__ + language tag of the language in this result. This language + code was detected to have the most likelihood of being + spoken in the audio. + """ + + alternatives = proto.RepeatedField( + proto.MESSAGE, number=1, message="SpeechRecognitionAlternative", + ) + + is_final = proto.Field(proto.BOOL, number=2) + + stability = proto.Field(proto.FLOAT, number=3) + + result_end_time = proto.Field(proto.MESSAGE, number=4, message=duration.Duration,) + + channel_tag = proto.Field(proto.INT32, number=5) + + language_code = proto.Field(proto.STRING, number=6) + + +class SpeechRecognitionResult(proto.Message): + r"""A speech recognition result corresponding to a portion of the + audio. + + Attributes: + alternatives (Sequence[~.cloud_speech.SpeechRecognitionAlternative]): + May contain one or more recognition hypotheses (up to the + maximum specified in ``max_alternatives``). These + alternatives are ordered in terms of accuracy, with the top + (first) alternative being the most probable, as ranked by + the recognizer. + channel_tag (int): + For multi-channel audio, this is the channel number + corresponding to the recognized result for the audio from + that channel. For audio_channel_count = N, its output values + can range from '1' to 'N'. + """ + + alternatives = proto.RepeatedField( + proto.MESSAGE, number=1, message="SpeechRecognitionAlternative", + ) + + channel_tag = proto.Field(proto.INT32, number=2) + + +class SpeechRecognitionAlternative(proto.Message): + r"""Alternative hypotheses (a.k.a. n-best list). + + Attributes: + transcript (str): + Transcript text representing the words that + the user spoke. + confidence (float): + The confidence estimate between 0.0 and 1.0. A higher number + indicates an estimated greater likelihood that the + recognized words are correct. This field is set only for the + top alternative of a non-streaming result or, of a streaming + result where ``is_final=true``. This field is not guaranteed + to be accurate and users should not rely on it to be always + provided. The default of 0.0 is a sentinel value indicating + ``confidence`` was not set. + words (Sequence[~.cloud_speech.WordInfo]): + A list of word-specific information for each recognized + word. Note: When ``enable_speaker_diarization`` is true, you + will see all the words from the beginning of the audio. + """ + + transcript = proto.Field(proto.STRING, number=1) + + confidence = proto.Field(proto.FLOAT, number=2) + + words = proto.RepeatedField(proto.MESSAGE, number=3, message="WordInfo",) + + +class WordInfo(proto.Message): + r"""Word-specific information for recognized words. + + Attributes: + start_time (~.duration.Duration): + Time offset relative to the beginning of the audio, and + corresponding to the start of the spoken word. This field is + only set if ``enable_word_time_offsets=true`` and only in + the top hypothesis. This is an experimental feature and the + accuracy of the time offset can vary. + end_time (~.duration.Duration): + Time offset relative to the beginning of the audio, and + corresponding to the end of the spoken word. This field is + only set if ``enable_word_time_offsets=true`` and only in + the top hypothesis. This is an experimental feature and the + accuracy of the time offset can vary. + word (str): + The word corresponding to this set of + information. + speaker_tag (int): + A distinct integer value is assigned for every speaker + within the audio. This field specifies which one of those + speakers was detected to have spoken this word. Value ranges + from '1' to diarization_speaker_count. speaker_tag is set if + enable_speaker_diarization = 'true' and only in the top + alternative. + """ + + start_time = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,) + + end_time = proto.Field(proto.MESSAGE, number=2, message=duration.Duration,) + + word = proto.Field(proto.STRING, number=3) + + speaker_tag = proto.Field(proto.INT32, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/__init__.py index b68a395372f9..6424649f7c39 100644 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/__init__.py +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/__init__.py @@ -1,30 +1,70 @@ -# Copyright 2018 Google LLC +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -from __future__ import absolute_import +from .services.speech import SpeechClient +from .types.cloud_speech import LongRunningRecognizeMetadata +from .types.cloud_speech import LongRunningRecognizeRequest +from .types.cloud_speech import LongRunningRecognizeResponse +from .types.cloud_speech import RecognitionAudio +from .types.cloud_speech import RecognitionConfig +from .types.cloud_speech import RecognitionMetadata +from .types.cloud_speech import RecognizeRequest +from .types.cloud_speech import RecognizeResponse +from .types.cloud_speech import SpeakerDiarizationConfig +from .types.cloud_speech import SpeechContext +from .types.cloud_speech import SpeechRecognitionAlternative +from .types.cloud_speech import SpeechRecognitionResult +from .types.cloud_speech import StreamingRecognitionConfig +from .types.cloud_speech import StreamingRecognitionResult +from .types.cloud_speech import StreamingRecognizeRequest +from .types.cloud_speech import StreamingRecognizeResponse +from .types.cloud_speech import WordInfo +from .types.resource import CustomClass +from .types.resource import PhraseSet +from .types.resource import SpeechAdaptation -from google.cloud.speech_v1p1beta1 import types -from google.cloud.speech_v1p1beta1.gapic import enums -from google.cloud.speech_v1p1beta1.gapic import speech_client from google.cloud.speech_v1.helpers import SpeechHelpers -class SpeechClient(SpeechHelpers, speech_client.SpeechClient): - __doc__ = speech_client.SpeechClient.__doc__ - enums = enums - types = types +class SpeechClient(SpeechHelpers, SpeechClient): + __doc__ = SpeechClient.__doc__ -__all__ = ("enums", "types", "SpeechClient") +__all__ = ( + "CustomClass", + "LongRunningRecognizeMetadata", + "LongRunningRecognizeRequest", + "LongRunningRecognizeResponse", + "PhraseSet", + "RecognitionAudio", + "RecognitionConfig", + "RecognitionMetadata", + "RecognizeRequest", + "RecognizeResponse", + "SpeakerDiarizationConfig", + "SpeechAdaptation", + "SpeechContext", + "SpeechRecognitionAlternative", + "SpeechRecognitionResult", + "StreamingRecognitionConfig", + "StreamingRecognitionResult", + "StreamingRecognizeRequest", + "StreamingRecognizeResponse", + "WordInfo", + "SpeechClient", +) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/enums.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/enums.py deleted file mode 100644 index b17772ec15ef..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/enums.py +++ /dev/null @@ -1,201 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class RecognitionConfig(object): - class AudioEncoding(enum.IntEnum): - """ - The encoding of the audio data sent in the request. - - All encodings support only 1 channel (mono) audio, unless the - ``audio_channel_count`` and ``enable_separate_recognition_per_channel`` - fields are set. - - For best results, the audio source should be captured and transmitted - using a lossless encoding (``FLAC`` or ``LINEAR16``). The accuracy of - the speech recognition can be reduced if lossy codecs are used to - capture or transmit audio, particularly if background noise is present. - Lossy codecs include ``MULAW``, ``AMR``, ``AMR_WB``, ``OGG_OPUS``, - ``SPEEX_WITH_HEADER_BYTE``, and ``MP3``. - - The ``FLAC`` and ``WAV`` audio file formats include a header that - describes the included audio content. You can request recognition for - ``WAV`` files that contain either ``LINEAR16`` or ``MULAW`` encoded - audio. If you send ``FLAC`` or ``WAV`` audio file format in your - request, you do not need to specify an ``AudioEncoding``; the audio - encoding format is determined from the file header. If you specify an - ``AudioEncoding`` when you send send ``FLAC`` or ``WAV`` audio, the - encoding configuration must match the encoding described in the audio - header; otherwise the request returns an - ``google.rpc.Code.INVALID_ARGUMENT`` error code. - - Attributes: - ENCODING_UNSPECIFIED (int): Not specified. - LINEAR16 (int): Uncompressed 16-bit signed little-endian samples (Linear PCM). - FLAC (int): ``FLAC`` (Free Lossless Audio Codec) is the recommended encoding - because it is lossless--therefore recognition is not compromised--and - requires only about half the bandwidth of ``LINEAR16``. ``FLAC`` stream - encoding supports 16-bit and 24-bit samples, however, not all fields in - ``STREAMINFO`` are supported. - MULAW (int): 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. - AMR (int): Adaptive Multi-Rate Narrowband codec. ``sample_rate_hertz`` must be - 8000. - AMR_WB (int): Adaptive Multi-Rate Wideband codec. ``sample_rate_hertz`` must be - 16000. - OGG_OPUS (int): Opus encoded audio frames in Ogg container - (`OggOpus `__). ``sample_rate_hertz`` - must be one of 8000, 12000, 16000, 24000, or 48000. - SPEEX_WITH_HEADER_BYTE (int): Although the use of lossy encodings is not recommended, if a very - low bitrate encoding is required, ``OGG_OPUS`` is highly preferred over - Speex encoding. The `Speex `__ encoding supported by - Cloud Speech API has a header byte in each block, as in MIME type - ``audio/x-speex-with-header-byte``. It is a variant of the RTP Speex - encoding defined in `RFC 5574 `__. - The stream is a sequence of blocks, one block per RTP packet. Each block - starts with a byte containing the length of the block, in bytes, - followed by one or more frames of Speex data, padded to an integral - number of bytes (octets) as specified in RFC 5574. In other words, each - RTP header is replaced with a single byte containing the block length. - Only Speex wideband is supported. ``sample_rate_hertz`` must be 16000. - MP3 (int): MP3 audio. Support all standard MP3 bitrates (which range from - 32-320 kbps). When using this encoding, ``sample_rate_hertz`` has to - match the sample rate of the file being used. - """ - - ENCODING_UNSPECIFIED = 0 - LINEAR16 = 1 - FLAC = 2 - MULAW = 3 - AMR = 4 - AMR_WB = 5 - OGG_OPUS = 6 - SPEEX_WITH_HEADER_BYTE = 7 - MP3 = 8 - - -class RecognitionMetadata(object): - class InteractionType(enum.IntEnum): - """ - Use case categories that the audio recognition request can be described - by. - - Attributes: - INTERACTION_TYPE_UNSPECIFIED (int): Use case is either unknown or is something other than one of the other - values below. - DISCUSSION (int): Multiple people in a conversation or discussion. For example in a - meeting with two or more people actively participating. Typically all - the primary people speaking would be in the same room (if not, see - PHONE_CALL) - PRESENTATION (int): One or more persons lecturing or presenting to others, mostly - uninterrupted. - PHONE_CALL (int): A phone-call or video-conference in which two or more people, who are - not in the same room, are actively participating. - VOICEMAIL (int): A recorded message intended for another person to listen to. - PROFESSIONALLY_PRODUCED (int): Professionally produced audio (eg. TV Show, Podcast). - VOICE_SEARCH (int): Transcribe spoken questions and queries into text. - VOICE_COMMAND (int): Transcribe voice commands, such as for controlling a device. - DICTATION (int): Transcribe speech to text to create a written document, such as a - text-message, email or report. - """ - - INTERACTION_TYPE_UNSPECIFIED = 0 - DISCUSSION = 1 - PRESENTATION = 2 - PHONE_CALL = 3 - VOICEMAIL = 4 - PROFESSIONALLY_PRODUCED = 5 - VOICE_SEARCH = 6 - VOICE_COMMAND = 7 - DICTATION = 8 - - class MicrophoneDistance(enum.IntEnum): - """ - Enumerates the types of capture settings describing an audio file. - - Attributes: - MICROPHONE_DISTANCE_UNSPECIFIED (int): Audio type is not known. - NEARFIELD (int): The audio was captured from a closely placed microphone. Eg. phone, - dictaphone, or handheld microphone. Generally if there speaker is within - 1 meter of the microphone. - MIDFIELD (int): The speaker if within 3 meters of the microphone. - FARFIELD (int): The speaker is more than 3 meters away from the microphone. - """ - - MICROPHONE_DISTANCE_UNSPECIFIED = 0 - NEARFIELD = 1 - MIDFIELD = 2 - FARFIELD = 3 - - class OriginalMediaType(enum.IntEnum): - """ - The original media the speech was recorded on. - - Attributes: - ORIGINAL_MEDIA_TYPE_UNSPECIFIED (int): Unknown original media type. - AUDIO (int): The speech data is an audio recording. - VIDEO (int): The speech data originally recorded on a video. - """ - - ORIGINAL_MEDIA_TYPE_UNSPECIFIED = 0 - AUDIO = 1 - VIDEO = 2 - - class RecordingDeviceType(enum.IntEnum): - """ - The type of device the speech was recorded with. - - Attributes: - RECORDING_DEVICE_TYPE_UNSPECIFIED (int): The recording device is unknown. - SMARTPHONE (int): Speech was recorded on a smartphone. - PC (int): Speech was recorded using a personal computer or tablet. - PHONE_LINE (int): Speech was recorded over a phone line. - VEHICLE (int): Speech was recorded in a vehicle. - OTHER_OUTDOOR_DEVICE (int): Speech was recorded outdoors. - OTHER_INDOOR_DEVICE (int): Speech was recorded indoors. - """ - - RECORDING_DEVICE_TYPE_UNSPECIFIED = 0 - SMARTPHONE = 1 - PC = 2 - PHONE_LINE = 3 - VEHICLE = 4 - OTHER_OUTDOOR_DEVICE = 5 - OTHER_INDOOR_DEVICE = 6 - - -class StreamingRecognizeResponse(object): - class SpeechEventType(enum.IntEnum): - """ - Indicates the type of speech event. - - Attributes: - SPEECH_EVENT_UNSPECIFIED (int): No speech event specified. - END_OF_SINGLE_UTTERANCE (int): This event indicates that the server has detected the end of the - user's speech utterance and expects no additional speech. Therefore, the - server will not process additional audio (although it may subsequently - return additional results). The client should stop sending additional - audio data, half-close the gRPC connection, and wait for any additional - results until the server closes the gRPC connection. This event is only - sent if ``single_utterance`` was set to ``true``, and is not used - otherwise. - """ - - SPEECH_EVENT_UNSPECIFIED = 0 - END_OF_SINGLE_UTTERANCE = 1 diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/speech_client.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/speech_client.py deleted file mode 100644 index 1ee874bc8bb9..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/speech_client.py +++ /dev/null @@ -1,410 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.speech.v1p1beta1 Speech API.""" - -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.speech_v1p1beta1.gapic import enums -from google.cloud.speech_v1p1beta1.gapic import speech_client_config -from google.cloud.speech_v1p1beta1.gapic.transports import speech_grpc_transport -from google.cloud.speech_v1p1beta1.proto import cloud_speech_pb2 -from google.cloud.speech_v1p1beta1.proto import cloud_speech_pb2_grpc -from google.longrunning import operations_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-speech",).version - - -class SpeechClient(object): - """Service that implements Google Cloud Speech API.""" - - SERVICE_ADDRESS = "speech.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.speech.v1p1beta1.Speech" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpeechClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.SpeechGrpcTransport, - Callable[[~.Credentials, type], ~.SpeechGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = speech_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=speech_grpc_transport.SpeechGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = speech_grpc_transport.SpeechGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def recognize( - self, - config, - audio, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Performs synchronous speech recognition: receive results after all audio - has been sent and processed. - - Example: - >>> from google.cloud import speech_v1p1beta1 - >>> from google.cloud.speech_v1p1beta1 import enums - >>> - >>> client = speech_v1p1beta1.SpeechClient() - >>> - >>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC - >>> sample_rate_hertz = 44100 - >>> language_code = 'en-US' - >>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code} - >>> uri = 'gs://bucket_name/file_name.flac' - >>> audio = {'uri': uri} - >>> - >>> response = client.recognize(config, audio) - - Args: - config (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionConfig]): Required. Provides information to the recognizer that specifies how to - process the request. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionConfig` - audio (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionAudio]): Required. The audio data to be recognized. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionAudio` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.speech_v1p1beta1.types.RecognizeResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "recognize" not in self._inner_api_calls: - self._inner_api_calls[ - "recognize" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.recognize, - default_retry=self._method_configs["Recognize"].retry, - default_timeout=self._method_configs["Recognize"].timeout, - client_info=self._client_info, - ) - - request = cloud_speech_pb2.RecognizeRequest(config=config, audio=audio,) - return self._inner_api_calls["recognize"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def long_running_recognize( - self, - config, - audio, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Performs asynchronous speech recognition: receive results via the - google.longrunning.Operations interface. Returns either an - ``Operation.error`` or an ``Operation.response`` which contains a - ``LongRunningRecognizeResponse`` message. For more information on - asynchronous speech recognition, see the - `how-to `__. - - Example: - >>> from google.cloud import speech_v1p1beta1 - >>> from google.cloud.speech_v1p1beta1 import enums - >>> - >>> client = speech_v1p1beta1.SpeechClient() - >>> - >>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC - >>> sample_rate_hertz = 44100 - >>> language_code = 'en-US' - >>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code} - >>> uri = 'gs://bucket_name/file_name.flac' - >>> audio = {'uri': uri} - >>> - >>> response = client.long_running_recognize(config, audio) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - config (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionConfig]): Required. Provides information to the recognizer that specifies how to - process the request. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionConfig` - audio (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionAudio]): Required. The audio data to be recognized. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionAudio` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.speech_v1p1beta1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "long_running_recognize" not in self._inner_api_calls: - self._inner_api_calls[ - "long_running_recognize" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.long_running_recognize, - default_retry=self._method_configs["LongRunningRecognize"].retry, - default_timeout=self._method_configs["LongRunningRecognize"].timeout, - client_info=self._client_info, - ) - - request = cloud_speech_pb2.LongRunningRecognizeRequest( - config=config, audio=audio, - ) - operation = self._inner_api_calls["long_running_recognize"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - cloud_speech_pb2.LongRunningRecognizeResponse, - metadata_type=cloud_speech_pb2.LongRunningRecognizeMetadata, - ) - - def streaming_recognize( - self, - requests, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Performs bidirectional streaming speech recognition: receive results while - sending audio. This method is only available via the gRPC API (not REST). - - Example: - >>> from google.cloud import speech_v1p1beta1 - >>> - >>> client = speech_v1p1beta1.SpeechClient() - >>> - >>> request = {} - >>> - >>> requests = [request] - >>> for element in client.streaming_recognize(requests): - ... # process element - ... pass - - Args: - requests (iterator[dict|google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2.StreamingRecognizeRequest]): The input objects. If a dict is provided, it must be of the - same form as the protobuf message :class:`~google.cloud.speech_v1p1beta1.types.StreamingRecognizeRequest` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.speech_v1p1beta1.types.StreamingRecognizeResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "streaming_recognize" not in self._inner_api_calls: - self._inner_api_calls[ - "streaming_recognize" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.streaming_recognize, - default_retry=self._method_configs["StreamingRecognize"].retry, - default_timeout=self._method_configs["StreamingRecognize"].timeout, - client_info=self._client_info, - ) - - return self._inner_api_calls["streaming_recognize"]( - requests, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/speech_client_config.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/speech_client_config.py deleted file mode 100644 index 722aa5653899..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/speech_client_config.py +++ /dev/null @@ -1,57 +0,0 @@ -config = { - "interfaces": { - "google.cloud.speech.v1p1beta1.Speech": { - "retry_codes": { - "retry_policy_1_codes": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "no_retry_codes": [], - "no_retry_1_codes": [], - }, - "retry_params": { - "retry_policy_1_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 5000000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 5000000, - "total_timeout_millis": 5000000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_1_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 5000000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 5000000, - "total_timeout_millis": 5000000, - }, - }, - "methods": { - "Recognize": { - "timeout_millis": 5000000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "LongRunningRecognize": { - "timeout_millis": 5000000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "StreamingRecognize": { - "timeout_millis": 5000000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - }, - } - } -} diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/transports/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/transports/speech_grpc_transport.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/transports/speech_grpc_transport.py deleted file mode 100644 index 382f53c1aa54..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/gapic/transports/speech_grpc_transport.py +++ /dev/null @@ -1,162 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.speech_v1p1beta1.proto import cloud_speech_pb2_grpc - - -class SpeechGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.speech.v1p1beta1 Speech API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="speech.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "speech_stub": cloud_speech_pb2_grpc.SpeechStub(channel), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="speech.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def recognize(self): - """Return the gRPC stub for :meth:`SpeechClient.recognize`. - - Performs synchronous speech recognition: receive results after all audio - has been sent and processed. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["speech_stub"].Recognize - - @property - def long_running_recognize(self): - """Return the gRPC stub for :meth:`SpeechClient.long_running_recognize`. - - Performs asynchronous speech recognition: receive results via the - google.longrunning.Operations interface. Returns either an - ``Operation.error`` or an ``Operation.response`` which contains a - ``LongRunningRecognizeResponse`` message. For more information on - asynchronous speech recognition, see the - `how-to `__. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["speech_stub"].LongRunningRecognize - - @property - def streaming_recognize(self): - """Return the gRPC stub for :meth:`SpeechClient.streaming_recognize`. - - Performs bidirectional streaming speech recognition: receive results while - sending audio. This method is only available via the gRPC API (not REST). - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["speech_stub"].StreamingRecognize diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/cloud_speech_pb2.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/cloud_speech_pb2.py deleted file mode 100644 index c79941862be7..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/cloud_speech_pb2.py +++ /dev/null @@ -1,3083 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/speech_v1p1beta1/proto/cloud_speech.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.speech_v1p1beta1.proto import ( - resource_pb2 as google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_resource__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/speech_v1p1beta1/proto/cloud_speech.proto", - package="google.cloud.speech.v1p1beta1", - syntax="proto3", - serialized_options=b"\n!com.google.cloud.speech.v1p1beta1B\013SpeechProtoP\001ZCgoogle.golang.org/genproto/googleapis/cloud/speech/v1p1beta1;speech\370\001\001\242\002\003GCS", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n6google/cloud/speech_v1p1beta1/proto/cloud_speech.proto\x12\x1dgoogle.cloud.speech.v1p1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x32google/cloud/speech_v1p1beta1/proto/resource.proto\x1a#google/longrunning/operations.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x9e\x01\n\x10RecognizeRequest\x12\x45\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x30.google.cloud.speech.v1p1beta1.RecognitionConfigB\x03\xe0\x41\x02\x12\x43\n\x05\x61udio\x18\x02 \x01(\x0b\x32/.google.cloud.speech.v1p1beta1.RecognitionAudioB\x03\xe0\x41\x02"\xa9\x01\n\x1bLongRunningRecognizeRequest\x12\x45\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x30.google.cloud.speech.v1p1beta1.RecognitionConfigB\x03\xe0\x41\x02\x12\x43\n\x05\x61udio\x18\x02 \x01(\x0b\x32/.google.cloud.speech.v1p1beta1.RecognitionAudioB\x03\xe0\x41\x02"\xa0\x01\n\x19StreamingRecognizeRequest\x12U\n\x10streaming_config\x18\x01 \x01(\x0b\x32\x39.google.cloud.speech.v1p1beta1.StreamingRecognitionConfigH\x00\x12\x17\n\raudio_content\x18\x02 \x01(\x0cH\x00\x42\x13\n\x11streaming_request"\x96\x01\n\x1aStreamingRecognitionConfig\x12\x45\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x30.google.cloud.speech.v1p1beta1.RecognitionConfigB\x03\xe0\x41\x02\x12\x18\n\x10single_utterance\x18\x02 \x01(\x08\x12\x17\n\x0finterim_results\x18\x03 \x01(\x08"\xdc\x07\n\x11RecognitionConfig\x12P\n\x08\x65ncoding\x18\x01 \x01(\x0e\x32>.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding\x12\x19\n\x11sample_rate_hertz\x18\x02 \x01(\x05\x12\x1b\n\x13\x61udio_channel_count\x18\x07 \x01(\x05\x12/\n\'enable_separate_recognition_per_channel\x18\x0c \x01(\x08\x12\x1a\n\rlanguage_code\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12"\n\x1a\x61lternative_language_codes\x18\x12 \x03(\t\x12\x18\n\x10max_alternatives\x18\x04 \x01(\x05\x12\x18\n\x10profanity_filter\x18\x05 \x01(\x08\x12\x43\n\nadaptation\x18\x14 \x01(\x0b\x32/.google.cloud.speech.v1p1beta1.SpeechAdaptation\x12\x45\n\x0fspeech_contexts\x18\x06 \x03(\x0b\x32,.google.cloud.speech.v1p1beta1.SpeechContext\x12 \n\x18\x65nable_word_time_offsets\x18\x08 \x01(\x08\x12\x1e\n\x16\x65nable_word_confidence\x18\x0f \x01(\x08\x12$\n\x1c\x65nable_automatic_punctuation\x18\x0b \x01(\x08\x12&\n\x1a\x65nable_speaker_diarization\x18\x10 \x01(\x08\x42\x02\x18\x01\x12%\n\x19\x64iarization_speaker_count\x18\x11 \x01(\x05\x42\x02\x18\x01\x12S\n\x12\x64iarization_config\x18\x13 \x01(\x0b\x32\x37.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig\x12\x44\n\x08metadata\x18\t \x01(\x0b\x32\x32.google.cloud.speech.v1p1beta1.RecognitionMetadata\x12\r\n\x05model\x18\r \x01(\t\x12\x14\n\x0cuse_enhanced\x18\x0e \x01(\x08"\x94\x01\n\rAudioEncoding\x12\x18\n\x14\x45NCODING_UNSPECIFIED\x10\x00\x12\x0c\n\x08LINEAR16\x10\x01\x12\x08\n\x04\x46LAC\x10\x02\x12\t\n\x05MULAW\x10\x03\x12\x07\n\x03\x41MR\x10\x04\x12\n\n\x06\x41MR_WB\x10\x05\x12\x0c\n\x08OGG_OPUS\x10\x06\x12\x1a\n\x16SPEEX_WITH_HEADER_BYTE\x10\x07\x12\x07\n\x03MP3\x10\x08"\x90\x01\n\x18SpeakerDiarizationConfig\x12"\n\x1a\x65nable_speaker_diarization\x18\x01 \x01(\x08\x12\x19\n\x11min_speaker_count\x18\x02 \x01(\x05\x12\x19\n\x11max_speaker_count\x18\x03 \x01(\x05\x12\x1a\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x05\x18\x01\xe0\x41\x03"\xd7\x08\n\x13RecognitionMetadata\x12\\\n\x10interaction_type\x18\x01 \x01(\x0e\x32\x42.google.cloud.speech.v1p1beta1.RecognitionMetadata.InteractionType\x12$\n\x1cindustry_naics_code_of_audio\x18\x03 \x01(\r\x12\x62\n\x13microphone_distance\x18\x04 \x01(\x0e\x32\x45.google.cloud.speech.v1p1beta1.RecognitionMetadata.MicrophoneDistance\x12\x61\n\x13original_media_type\x18\x05 \x01(\x0e\x32\x44.google.cloud.speech.v1p1beta1.RecognitionMetadata.OriginalMediaType\x12\x65\n\x15recording_device_type\x18\x06 \x01(\x0e\x32\x46.google.cloud.speech.v1p1beta1.RecognitionMetadata.RecordingDeviceType\x12\x1d\n\x15recording_device_name\x18\x07 \x01(\t\x12\x1a\n\x12original_mime_type\x18\x08 \x01(\t\x12\x19\n\robfuscated_id\x18\t \x01(\x03\x42\x02\x18\x01\x12\x13\n\x0b\x61udio_topic\x18\n \x01(\t"\xc5\x01\n\x0fInteractionType\x12 \n\x1cINTERACTION_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nDISCUSSION\x10\x01\x12\x10\n\x0cPRESENTATION\x10\x02\x12\x0e\n\nPHONE_CALL\x10\x03\x12\r\n\tVOICEMAIL\x10\x04\x12\x1b\n\x17PROFESSIONALLY_PRODUCED\x10\x05\x12\x10\n\x0cVOICE_SEARCH\x10\x06\x12\x11\n\rVOICE_COMMAND\x10\x07\x12\r\n\tDICTATION\x10\x08"d\n\x12MicrophoneDistance\x12#\n\x1fMICROPHONE_DISTANCE_UNSPECIFIED\x10\x00\x12\r\n\tNEARFIELD\x10\x01\x12\x0c\n\x08MIDFIELD\x10\x02\x12\x0c\n\x08\x46\x41RFIELD\x10\x03"N\n\x11OriginalMediaType\x12#\n\x1fORIGINAL_MEDIA_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05\x41UDIO\x10\x01\x12\t\n\x05VIDEO\x10\x02"\xa4\x01\n\x13RecordingDeviceType\x12%\n!RECORDING_DEVICE_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nSMARTPHONE\x10\x01\x12\x06\n\x02PC\x10\x02\x12\x0e\n\nPHONE_LINE\x10\x03\x12\x0b\n\x07VEHICLE\x10\x04\x12\x18\n\x14OTHER_OUTDOOR_DEVICE\x10\x05\x12\x17\n\x13OTHER_INDOOR_DEVICE\x10\x06"/\n\rSpeechContext\x12\x0f\n\x07phrases\x18\x01 \x03(\t\x12\r\n\x05\x62oost\x18\x04 \x01(\x02"D\n\x10RecognitionAudio\x12\x11\n\x07\x63ontent\x18\x01 \x01(\x0cH\x00\x12\r\n\x03uri\x18\x02 \x01(\tH\x00\x42\x0e\n\x0c\x61udio_source"\\\n\x11RecognizeResponse\x12G\n\x07results\x18\x02 \x03(\x0b\x32\x36.google.cloud.speech.v1p1beta1.SpeechRecognitionResult"g\n\x1cLongRunningRecognizeResponse\x12G\n\x07results\x18\x02 \x03(\x0b\x32\x36.google.cloud.speech.v1p1beta1.SpeechRecognitionResult"\xb0\x01\n\x1cLongRunningRecognizeMetadata\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10last_update_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x10\n\x03uri\x18\x04 \x01(\tB\x03\xe0\x41\x03"\xbf\x02\n\x1aStreamingRecognizeResponse\x12!\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12J\n\x07results\x18\x02 \x03(\x0b\x32\x39.google.cloud.speech.v1p1beta1.StreamingRecognitionResult\x12\x64\n\x11speech_event_type\x18\x04 \x01(\x0e\x32I.google.cloud.speech.v1p1beta1.StreamingRecognizeResponse.SpeechEventType"L\n\x0fSpeechEventType\x12\x1c\n\x18SPEECH_EVENT_UNSPECIFIED\x10\x00\x12\x1b\n\x17\x45ND_OF_SINGLE_UTTERANCE\x10\x01"\xf9\x01\n\x1aStreamingRecognitionResult\x12Q\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32;.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative\x12\x10\n\x08is_final\x18\x02 \x01(\x08\x12\x11\n\tstability\x18\x03 \x01(\x02\x12\x32\n\x0fresult_end_time\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x63hannel_tag\x18\x05 \x01(\x05\x12\x1a\n\rlanguage_code\x18\x06 \x01(\tB\x03\xe0\x41\x03"\x9d\x01\n\x17SpeechRecognitionResult\x12Q\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32;.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative\x12\x13\n\x0b\x63hannel_tag\x18\x02 \x01(\x05\x12\x1a\n\rlanguage_code\x18\x05 \x01(\tB\x03\xe0\x41\x03"~\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x36\n\x05words\x18\x03 \x03(\x0b\x32\'.google.cloud.speech.v1p1beta1.WordInfo"\xa2\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03\x32\x82\x05\n\x06Speech\x12\xa5\x01\n\tRecognize\x12/.google.cloud.speech.v1p1beta1.RecognizeRequest\x1a\x30.google.cloud.speech.v1p1beta1.RecognizeResponse"5\x82\xd3\xe4\x93\x02 "\x1b/v1p1beta1/speech:recognize:\x01*\xda\x41\x0c\x63onfig,audio\x12\xf2\x01\n\x14LongRunningRecognize\x12:.google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest\x1a\x1d.google.longrunning.Operation"\x7f\x82\xd3\xe4\x93\x02+"&/v1p1beta1/speech:longrunningrecognize:\x01*\xda\x41\x0c\x63onfig,audio\xca\x41<\n\x1cLongRunningRecognizeResponse\x12\x1cLongRunningRecognizeMetadata\x12\x8f\x01\n\x12StreamingRecognize\x12\x38.google.cloud.speech.v1p1beta1.StreamingRecognizeRequest\x1a\x39.google.cloud.speech.v1p1beta1.StreamingRecognizeResponse"\x00(\x01\x30\x01\x1aI\xca\x41\x15speech.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x80\x01\n!com.google.cloud.speech.v1p1beta1B\x0bSpeechProtoP\x01ZCgoogle.golang.org/genproto/googleapis/cloud/speech/v1p1beta1;speech\xf8\x01\x01\xa2\x02\x03GCSb\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_resource__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_any__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_rpc_dot_status__pb2.DESCRIPTOR, - ], -) - - -_RECOGNITIONCONFIG_AUDIOENCODING = _descriptor.EnumDescriptor( - name="AudioEncoding", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="ENCODING_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="LINEAR16", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FLAC", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="MULAW", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="AMR", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="AMR_WB", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="OGG_OPUS", - index=6, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SPEEX_WITH_HEADER_BYTE", - index=7, - number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="MP3", - index=8, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1900, - serialized_end=2048, -) -_sym_db.RegisterEnumDescriptor(_RECOGNITIONCONFIG_AUDIOENCODING) - -_RECOGNITIONMETADATA_INTERACTIONTYPE = _descriptor.EnumDescriptor( - name="InteractionType", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.InteractionType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="INTERACTION_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DISCUSSION", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PRESENTATION", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PHONE_CALL", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="VOICEMAIL", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PROFESSIONALLY_PRODUCED", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="VOICE_SEARCH", - index=6, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="VOICE_COMMAND", - index=7, - number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DICTATION", - index=8, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2763, - serialized_end=2960, -) -_sym_db.RegisterEnumDescriptor(_RECOGNITIONMETADATA_INTERACTIONTYPE) - -_RECOGNITIONMETADATA_MICROPHONEDISTANCE = _descriptor.EnumDescriptor( - name="MicrophoneDistance", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.MicrophoneDistance", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="MICROPHONE_DISTANCE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NEARFIELD", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="MIDFIELD", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FARFIELD", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2962, - serialized_end=3062, -) -_sym_db.RegisterEnumDescriptor(_RECOGNITIONMETADATA_MICROPHONEDISTANCE) - -_RECOGNITIONMETADATA_ORIGINALMEDIATYPE = _descriptor.EnumDescriptor( - name="OriginalMediaType", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.OriginalMediaType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="ORIGINAL_MEDIA_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="AUDIO", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="VIDEO", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=3064, - serialized_end=3142, -) -_sym_db.RegisterEnumDescriptor(_RECOGNITIONMETADATA_ORIGINALMEDIATYPE) - -_RECOGNITIONMETADATA_RECORDINGDEVICETYPE = _descriptor.EnumDescriptor( - name="RecordingDeviceType", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.RecordingDeviceType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="RECORDING_DEVICE_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SMARTPHONE", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PC", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PHONE_LINE", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="VEHICLE", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="OTHER_OUTDOOR_DEVICE", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="OTHER_INDOOR_DEVICE", - index=6, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=3145, - serialized_end=3309, -) -_sym_db.RegisterEnumDescriptor(_RECOGNITIONMETADATA_RECORDINGDEVICETYPE) - -_STREAMINGRECOGNIZERESPONSE_SPEECHEVENTTYPE = _descriptor.EnumDescriptor( - name="SpeechEventType", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognizeResponse.SpeechEventType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="SPEECH_EVENT_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="END_OF_SINGLE_UTTERANCE", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=4052, - serialized_end=4128, -) -_sym_db.RegisterEnumDescriptor(_STREAMINGRECOGNIZERESPONSE_SPEECHEVENTTYPE) - - -_RECOGNIZEREQUEST = _descriptor.Descriptor( - name="RecognizeRequest", - full_name="google.cloud.speech.v1p1beta1.RecognizeRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="config", - full_name="google.cloud.speech.v1p1beta1.RecognizeRequest.config", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="audio", - full_name="google.cloud.speech.v1p1beta1.RecognizeRequest.audio", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=411, - serialized_end=569, -) - - -_LONGRUNNINGRECOGNIZEREQUEST = _descriptor.Descriptor( - name="LongRunningRecognizeRequest", - full_name="google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="config", - full_name="google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest.config", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="audio", - full_name="google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest.audio", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=572, - serialized_end=741, -) - - -_STREAMINGRECOGNIZEREQUEST = _descriptor.Descriptor( - name="StreamingRecognizeRequest", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognizeRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="streaming_config", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognizeRequest.streaming_config", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="audio_content", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognizeRequest.audio_content", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="streaming_request", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognizeRequest.streaming_request", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=744, - serialized_end=904, -) - - -_STREAMINGRECOGNITIONCONFIG = _descriptor.Descriptor( - name="StreamingRecognitionConfig", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognitionConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="config", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognitionConfig.config", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="single_utterance", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognitionConfig.single_utterance", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="interim_results", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognitionConfig.interim_results", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=907, - serialized_end=1057, -) - - -_RECOGNITIONCONFIG = _descriptor.Descriptor( - name="RecognitionConfig", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="encoding", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.encoding", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="sample_rate_hertz", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.sample_rate_hertz", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="audio_channel_count", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.audio_channel_count", - index=2, - number=7, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="enable_separate_recognition_per_channel", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.enable_separate_recognition_per_channel", - index=3, - number=12, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="language_code", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.language_code", - index=4, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="alternative_language_codes", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.alternative_language_codes", - index=5, - number=18, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="max_alternatives", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.max_alternatives", - index=6, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="profanity_filter", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.profanity_filter", - index=7, - number=5, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="adaptation", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.adaptation", - index=8, - number=20, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="speech_contexts", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.speech_contexts", - index=9, - number=6, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="enable_word_time_offsets", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.enable_word_time_offsets", - index=10, - number=8, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="enable_word_confidence", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.enable_word_confidence", - index=11, - number=15, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="enable_automatic_punctuation", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.enable_automatic_punctuation", - index=12, - number=11, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="enable_speaker_diarization", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.enable_speaker_diarization", - index=13, - number=16, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="diarization_speaker_count", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.diarization_speaker_count", - index=14, - number=17, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="diarization_config", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.diarization_config", - index=15, - number=19, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="metadata", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.metadata", - index=16, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="model", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.model", - index=17, - number=13, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="use_enhanced", - full_name="google.cloud.speech.v1p1beta1.RecognitionConfig.use_enhanced", - index=18, - number=14, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_RECOGNITIONCONFIG_AUDIOENCODING,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1060, - serialized_end=2048, -) - - -_SPEAKERDIARIZATIONCONFIG = _descriptor.Descriptor( - name="SpeakerDiarizationConfig", - full_name="google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="enable_speaker_diarization", - full_name="google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.enable_speaker_diarization", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="min_speaker_count", - full_name="google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.min_speaker_count", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="max_speaker_count", - full_name="google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.max_speaker_count", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="speaker_tag", - full_name="google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.speaker_tag", - index=3, - number=5, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2051, - serialized_end=2195, -) - - -_RECOGNITIONMETADATA = _descriptor.Descriptor( - name="RecognitionMetadata", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="interaction_type", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.interaction_type", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="industry_naics_code_of_audio", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.industry_naics_code_of_audio", - index=1, - number=3, - type=13, - cpp_type=3, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="microphone_distance", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.microphone_distance", - index=2, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="original_media_type", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.original_media_type", - index=3, - number=5, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="recording_device_type", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.recording_device_type", - index=4, - number=6, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="recording_device_name", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.recording_device_name", - index=5, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="original_mime_type", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.original_mime_type", - index=6, - number=8, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="obfuscated_id", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.obfuscated_id", - index=7, - number=9, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="audio_topic", - full_name="google.cloud.speech.v1p1beta1.RecognitionMetadata.audio_topic", - index=8, - number=10, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[ - _RECOGNITIONMETADATA_INTERACTIONTYPE, - _RECOGNITIONMETADATA_MICROPHONEDISTANCE, - _RECOGNITIONMETADATA_ORIGINALMEDIATYPE, - _RECOGNITIONMETADATA_RECORDINGDEVICETYPE, - ], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2198, - serialized_end=3309, -) - - -_SPEECHCONTEXT = _descriptor.Descriptor( - name="SpeechContext", - full_name="google.cloud.speech.v1p1beta1.SpeechContext", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="phrases", - full_name="google.cloud.speech.v1p1beta1.SpeechContext.phrases", - index=0, - number=1, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="boost", - full_name="google.cloud.speech.v1p1beta1.SpeechContext.boost", - index=1, - number=4, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3311, - serialized_end=3358, -) - - -_RECOGNITIONAUDIO = _descriptor.Descriptor( - name="RecognitionAudio", - full_name="google.cloud.speech.v1p1beta1.RecognitionAudio", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="content", - full_name="google.cloud.speech.v1p1beta1.RecognitionAudio.content", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="uri", - full_name="google.cloud.speech.v1p1beta1.RecognitionAudio.uri", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="audio_source", - full_name="google.cloud.speech.v1p1beta1.RecognitionAudio.audio_source", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=3360, - serialized_end=3428, -) - - -_RECOGNIZERESPONSE = _descriptor.Descriptor( - name="RecognizeResponse", - full_name="google.cloud.speech.v1p1beta1.RecognizeResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="results", - full_name="google.cloud.speech.v1p1beta1.RecognizeResponse.results", - index=0, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3430, - serialized_end=3522, -) - - -_LONGRUNNINGRECOGNIZERESPONSE = _descriptor.Descriptor( - name="LongRunningRecognizeResponse", - full_name="google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="results", - full_name="google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse.results", - index=0, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3524, - serialized_end=3627, -) - - -_LONGRUNNINGRECOGNIZEMETADATA = _descriptor.Descriptor( - name="LongRunningRecognizeMetadata", - full_name="google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="progress_percent", - full_name="google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata.progress_percent", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata.start_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="last_update_time", - full_name="google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata.last_update_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="uri", - full_name="google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata.uri", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3630, - serialized_end=3806, -) - - -_STREAMINGRECOGNIZERESPONSE = _descriptor.Descriptor( - name="StreamingRecognizeResponse", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognizeResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="error", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognizeResponse.error", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="results", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognizeResponse.results", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="speech_event_type", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognizeResponse.speech_event_type", - index=2, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_STREAMINGRECOGNIZERESPONSE_SPEECHEVENTTYPE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3809, - serialized_end=4128, -) - - -_STREAMINGRECOGNITIONRESULT = _descriptor.Descriptor( - name="StreamingRecognitionResult", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognitionResult", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="alternatives", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognitionResult.alternatives", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="is_final", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognitionResult.is_final", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="stability", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognitionResult.stability", - index=2, - number=3, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="result_end_time", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognitionResult.result_end_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="channel_tag", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognitionResult.channel_tag", - index=4, - number=5, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="language_code", - full_name="google.cloud.speech.v1p1beta1.StreamingRecognitionResult.language_code", - index=5, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4131, - serialized_end=4380, -) - - -_SPEECHRECOGNITIONRESULT = _descriptor.Descriptor( - name="SpeechRecognitionResult", - full_name="google.cloud.speech.v1p1beta1.SpeechRecognitionResult", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="alternatives", - full_name="google.cloud.speech.v1p1beta1.SpeechRecognitionResult.alternatives", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="channel_tag", - full_name="google.cloud.speech.v1p1beta1.SpeechRecognitionResult.channel_tag", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="language_code", - full_name="google.cloud.speech.v1p1beta1.SpeechRecognitionResult.language_code", - index=2, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4383, - serialized_end=4540, -) - - -_SPEECHRECOGNITIONALTERNATIVE = _descriptor.Descriptor( - name="SpeechRecognitionAlternative", - full_name="google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="transcript", - full_name="google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative.transcript", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="confidence", - full_name="google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative.confidence", - index=1, - number=2, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="words", - full_name="google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative.words", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4542, - serialized_end=4668, -) - - -_WORDINFO = _descriptor.Descriptor( - name="WordInfo", - full_name="google.cloud.speech.v1p1beta1.WordInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.cloud.speech.v1p1beta1.WordInfo.start_time", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.cloud.speech.v1p1beta1.WordInfo.end_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="word", - full_name="google.cloud.speech.v1p1beta1.WordInfo.word", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="confidence", - full_name="google.cloud.speech.v1p1beta1.WordInfo.confidence", - index=3, - number=4, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="speaker_tag", - full_name="google.cloud.speech.v1p1beta1.WordInfo.speaker_tag", - index=4, - number=5, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4671, - serialized_end=4833, -) - -_RECOGNIZEREQUEST.fields_by_name["config"].message_type = _RECOGNITIONCONFIG -_RECOGNIZEREQUEST.fields_by_name["audio"].message_type = _RECOGNITIONAUDIO -_LONGRUNNINGRECOGNIZEREQUEST.fields_by_name["config"].message_type = _RECOGNITIONCONFIG -_LONGRUNNINGRECOGNIZEREQUEST.fields_by_name["audio"].message_type = _RECOGNITIONAUDIO -_STREAMINGRECOGNIZEREQUEST.fields_by_name[ - "streaming_config" -].message_type = _STREAMINGRECOGNITIONCONFIG -_STREAMINGRECOGNIZEREQUEST.oneofs_by_name["streaming_request"].fields.append( - _STREAMINGRECOGNIZEREQUEST.fields_by_name["streaming_config"] -) -_STREAMINGRECOGNIZEREQUEST.fields_by_name[ - "streaming_config" -].containing_oneof = _STREAMINGRECOGNIZEREQUEST.oneofs_by_name["streaming_request"] -_STREAMINGRECOGNIZEREQUEST.oneofs_by_name["streaming_request"].fields.append( - _STREAMINGRECOGNIZEREQUEST.fields_by_name["audio_content"] -) -_STREAMINGRECOGNIZEREQUEST.fields_by_name[ - "audio_content" -].containing_oneof = _STREAMINGRECOGNIZEREQUEST.oneofs_by_name["streaming_request"] -_STREAMINGRECOGNITIONCONFIG.fields_by_name["config"].message_type = _RECOGNITIONCONFIG -_RECOGNITIONCONFIG.fields_by_name[ - "encoding" -].enum_type = _RECOGNITIONCONFIG_AUDIOENCODING -_RECOGNITIONCONFIG.fields_by_name[ - "adaptation" -].message_type = ( - google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_resource__pb2._SPEECHADAPTATION -) -_RECOGNITIONCONFIG.fields_by_name["speech_contexts"].message_type = _SPEECHCONTEXT -_RECOGNITIONCONFIG.fields_by_name[ - "diarization_config" -].message_type = _SPEAKERDIARIZATIONCONFIG -_RECOGNITIONCONFIG.fields_by_name["metadata"].message_type = _RECOGNITIONMETADATA -_RECOGNITIONCONFIG_AUDIOENCODING.containing_type = _RECOGNITIONCONFIG -_RECOGNITIONMETADATA.fields_by_name[ - "interaction_type" -].enum_type = _RECOGNITIONMETADATA_INTERACTIONTYPE -_RECOGNITIONMETADATA.fields_by_name[ - "microphone_distance" -].enum_type = _RECOGNITIONMETADATA_MICROPHONEDISTANCE -_RECOGNITIONMETADATA.fields_by_name[ - "original_media_type" -].enum_type = _RECOGNITIONMETADATA_ORIGINALMEDIATYPE -_RECOGNITIONMETADATA.fields_by_name[ - "recording_device_type" -].enum_type = _RECOGNITIONMETADATA_RECORDINGDEVICETYPE -_RECOGNITIONMETADATA_INTERACTIONTYPE.containing_type = _RECOGNITIONMETADATA -_RECOGNITIONMETADATA_MICROPHONEDISTANCE.containing_type = _RECOGNITIONMETADATA -_RECOGNITIONMETADATA_ORIGINALMEDIATYPE.containing_type = _RECOGNITIONMETADATA -_RECOGNITIONMETADATA_RECORDINGDEVICETYPE.containing_type = _RECOGNITIONMETADATA -_RECOGNITIONAUDIO.oneofs_by_name["audio_source"].fields.append( - _RECOGNITIONAUDIO.fields_by_name["content"] -) -_RECOGNITIONAUDIO.fields_by_name[ - "content" -].containing_oneof = _RECOGNITIONAUDIO.oneofs_by_name["audio_source"] -_RECOGNITIONAUDIO.oneofs_by_name["audio_source"].fields.append( - _RECOGNITIONAUDIO.fields_by_name["uri"] -) -_RECOGNITIONAUDIO.fields_by_name[ - "uri" -].containing_oneof = _RECOGNITIONAUDIO.oneofs_by_name["audio_source"] -_RECOGNIZERESPONSE.fields_by_name["results"].message_type = _SPEECHRECOGNITIONRESULT -_LONGRUNNINGRECOGNIZERESPONSE.fields_by_name[ - "results" -].message_type = _SPEECHRECOGNITIONRESULT -_LONGRUNNINGRECOGNIZEMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_LONGRUNNINGRECOGNIZEMETADATA.fields_by_name[ - "last_update_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_STREAMINGRECOGNIZERESPONSE.fields_by_name[ - "error" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_STREAMINGRECOGNIZERESPONSE.fields_by_name[ - "results" -].message_type = _STREAMINGRECOGNITIONRESULT -_STREAMINGRECOGNIZERESPONSE.fields_by_name[ - "speech_event_type" -].enum_type = _STREAMINGRECOGNIZERESPONSE_SPEECHEVENTTYPE -_STREAMINGRECOGNIZERESPONSE_SPEECHEVENTTYPE.containing_type = ( - _STREAMINGRECOGNIZERESPONSE -) -_STREAMINGRECOGNITIONRESULT.fields_by_name[ - "alternatives" -].message_type = _SPEECHRECOGNITIONALTERNATIVE -_STREAMINGRECOGNITIONRESULT.fields_by_name[ - "result_end_time" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_SPEECHRECOGNITIONRESULT.fields_by_name[ - "alternatives" -].message_type = _SPEECHRECOGNITIONALTERNATIVE -_SPEECHRECOGNITIONALTERNATIVE.fields_by_name["words"].message_type = _WORDINFO -_WORDINFO.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_WORDINFO.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -DESCRIPTOR.message_types_by_name["RecognizeRequest"] = _RECOGNIZEREQUEST -DESCRIPTOR.message_types_by_name[ - "LongRunningRecognizeRequest" -] = _LONGRUNNINGRECOGNIZEREQUEST -DESCRIPTOR.message_types_by_name[ - "StreamingRecognizeRequest" -] = _STREAMINGRECOGNIZEREQUEST -DESCRIPTOR.message_types_by_name[ - "StreamingRecognitionConfig" -] = _STREAMINGRECOGNITIONCONFIG -DESCRIPTOR.message_types_by_name["RecognitionConfig"] = _RECOGNITIONCONFIG -DESCRIPTOR.message_types_by_name["SpeakerDiarizationConfig"] = _SPEAKERDIARIZATIONCONFIG -DESCRIPTOR.message_types_by_name["RecognitionMetadata"] = _RECOGNITIONMETADATA -DESCRIPTOR.message_types_by_name["SpeechContext"] = _SPEECHCONTEXT -DESCRIPTOR.message_types_by_name["RecognitionAudio"] = _RECOGNITIONAUDIO -DESCRIPTOR.message_types_by_name["RecognizeResponse"] = _RECOGNIZERESPONSE -DESCRIPTOR.message_types_by_name[ - "LongRunningRecognizeResponse" -] = _LONGRUNNINGRECOGNIZERESPONSE -DESCRIPTOR.message_types_by_name[ - "LongRunningRecognizeMetadata" -] = _LONGRUNNINGRECOGNIZEMETADATA -DESCRIPTOR.message_types_by_name[ - "StreamingRecognizeResponse" -] = _STREAMINGRECOGNIZERESPONSE -DESCRIPTOR.message_types_by_name[ - "StreamingRecognitionResult" -] = _STREAMINGRECOGNITIONRESULT -DESCRIPTOR.message_types_by_name["SpeechRecognitionResult"] = _SPEECHRECOGNITIONRESULT -DESCRIPTOR.message_types_by_name[ - "SpeechRecognitionAlternative" -] = _SPEECHRECOGNITIONALTERNATIVE -DESCRIPTOR.message_types_by_name["WordInfo"] = _WORDINFO -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -RecognizeRequest = _reflection.GeneratedProtocolMessageType( - "RecognizeRequest", - (_message.Message,), - { - "DESCRIPTOR": _RECOGNIZEREQUEST, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """The top-level message sent by the client for the ``Recognize`` method. - - Attributes: - config: - Required. Provides information to the recognizer that - specifies how to process the request. - audio: - Required. The audio data to be recognized. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.RecognizeRequest) - }, -) -_sym_db.RegisterMessage(RecognizeRequest) - -LongRunningRecognizeRequest = _reflection.GeneratedProtocolMessageType( - "LongRunningRecognizeRequest", - (_message.Message,), - { - "DESCRIPTOR": _LONGRUNNINGRECOGNIZEREQUEST, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """The top-level message sent by the client for the - ``LongRunningRecognize`` method. - - Attributes: - config: - Required. Provides information to the recognizer that - specifies how to process the request. - audio: - Required. The audio data to be recognized. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest) - }, -) -_sym_db.RegisterMessage(LongRunningRecognizeRequest) - -StreamingRecognizeRequest = _reflection.GeneratedProtocolMessageType( - "StreamingRecognizeRequest", - (_message.Message,), - { - "DESCRIPTOR": _STREAMINGRECOGNIZEREQUEST, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """The top-level message sent by the client for the - ``StreamingRecognize`` method. Multiple ``StreamingRecognizeRequest`` - messages are sent. The first message must contain a - ``streaming_config`` message and must not contain ``audio_content``. - All subsequent messages must contain ``audio_content`` and must not - contain a ``streaming_config`` message. - - Attributes: - streaming_request: - The streaming request, which is either a streaming config or - audio content. - streaming_config: - Provides information to the recognizer that specifies how to - process the request. The first ``StreamingRecognizeRequest`` - message must contain a ``streaming_config`` message. - audio_content: - The audio data to be recognized. Sequential chunks of audio - data are sent in sequential ``StreamingRecognizeRequest`` - messages. The first ``StreamingRecognizeRequest`` message must - not contain ``audio_content`` data and all subsequent - ``StreamingRecognizeRequest`` messages must contain - ``audio_content`` data. The audio bytes must be encoded as - specified in ``RecognitionConfig``. Note: as with all bytes - fields, proto buffers use a pure binary representation (not - base64). See `content limits `__. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.StreamingRecognizeRequest) - }, -) -_sym_db.RegisterMessage(StreamingRecognizeRequest) - -StreamingRecognitionConfig = _reflection.GeneratedProtocolMessageType( - "StreamingRecognitionConfig", - (_message.Message,), - { - "DESCRIPTOR": _STREAMINGRECOGNITIONCONFIG, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """Provides information to the recognizer that specifies how to process - the request. - - Attributes: - config: - Required. Provides information to the recognizer that - specifies how to process the request. - single_utterance: - If ``false`` or omitted, the recognizer will perform - continuous recognition (continuing to wait for and process - audio even if the user pauses speaking) until the client - closes the input stream (gRPC API) or until the maximum time - limit has been reached. May return multiple - ``StreamingRecognitionResult``\ s with the ``is_final`` flag - set to ``true``. If ``true``, the recognizer will detect a - single spoken utterance. When it detects that the user has - paused or stopped speaking, it will return an - ``END_OF_SINGLE_UTTERANCE`` event and cease recognition. It - will return no more than one ``StreamingRecognitionResult`` - with the ``is_final`` flag set to ``true``. - interim_results: - If ``true``, interim results (tentative hypotheses) may be - returned as they become available (these interim results are - indicated with the ``is_final=false`` flag). If ``false`` or - omitted, only ``is_final=true`` result(s) are returned. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.StreamingRecognitionConfig) - }, -) -_sym_db.RegisterMessage(StreamingRecognitionConfig) - -RecognitionConfig = _reflection.GeneratedProtocolMessageType( - "RecognitionConfig", - (_message.Message,), - { - "DESCRIPTOR": _RECOGNITIONCONFIG, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """Provides information to the recognizer that specifies how to process - the request. - - Attributes: - encoding: - Encoding of audio data sent in all ``RecognitionAudio`` - messages. This field is optional for ``FLAC`` and ``WAV`` - audio files and required for all other audio formats. For - details, see [AudioEncoding][google.cloud.speech.v1p1beta1.Rec - ognitionConfig.AudioEncoding]. - sample_rate_hertz: - Sample rate in Hertz of the audio data sent in all - ``RecognitionAudio`` messages. Valid values are: 8000-48000. - 16000 is optimal. For best results, set the sampling rate of - the audio source to 16000 Hz. If that’s not possible, use the - native sample rate of the audio source (instead of re- - sampling). This field is optional for FLAC and WAV audio - files, but is required for all other audio formats. For - details, see [AudioEncoding][google.cloud.speech.v1p1beta1.Rec - ognitionConfig.AudioEncoding]. - audio_channel_count: - The number of channels in the input audio data. ONLY set this - for MULTI-CHANNEL recognition. Valid values for LINEAR16 and - FLAC are ``1``-``8``. Valid values for OGG_OPUS are ‘1’-‘254’. - Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE - is only ``1``. If ``0`` or omitted, defaults to one channel - (mono). Note: We only recognize the first channel by default. - To perform independent recognition on each channel set - ``enable_separate_recognition_per_channel`` to ‘true’. - enable_separate_recognition_per_channel: - This needs to be set to ``true`` explicitly and - ``audio_channel_count`` > 1 to get each channel recognized - separately. The recognition result will contain a - ``channel_tag`` field to state which channel that result - belongs to. If this is not true, we will only recognize the - first channel. The request is billed cumulatively for all - channels recognized: ``audio_channel_count`` multiplied by the - length of the audio. - language_code: - Required. The language of the supplied audio as a `BCP-47 - `__ language - tag. Example: “en-US”. See `Language Support - `__ - for a list of the currently supported language codes. - alternative_language_codes: - A list of up to 3 additional `BCP-47 `__ language tags, listing - possible alternative languages of the supplied audio. See - `Language Support `__ for a list of the currently supported - language codes. If alternative languages are listed, - recognition result will contain recognition in the most likely - language detected including the main language_code. The - recognition result will include the language tag of the - language detected in the audio. Note: This feature is only - supported for Voice Command and Voice Search use cases and - performance may vary for other use cases (e.g., phone call - transcription). - max_alternatives: - Maximum number of recognition hypotheses to be returned. - Specifically, the maximum number of - ``SpeechRecognitionAlternative`` messages within each - ``SpeechRecognitionResult``. The server may return fewer than - ``max_alternatives``. Valid values are ``0``-``30``. A value - of ``0`` or ``1`` will return a maximum of one. If omitted, - will return a maximum of one. - profanity_filter: - If set to ``true``, the server will attempt to filter out - profanities, replacing all but the initial character in each - filtered word with asterisks, e.g. "f***". If set to ``false`` - or omitted, profanities won’t be filtered out. - adaptation: - Speech adaptation configuration improves the accuracy of - speech recognition. When speech adaptation is set it - supersedes the ``speech_contexts`` field. For more - information, see the `speech adaptation - `__ documentation. - speech_contexts: - Array of - [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. - A means to provide context to assist the speech recognition. - For more information, see `speech adaptation - `__. - enable_word_time_offsets: - If ``true``, the top result includes a list of words and the - start and end time offsets (timestamps) for those words. If - ``false``, no word-level time offset information is returned. - The default is ``false``. - enable_word_confidence: - If ``true``, the top result includes a list of words and the - confidence for those words. If ``false``, no word-level - confidence information is returned. The default is ``false``. - enable_automatic_punctuation: - If ‘true’, adds punctuation to recognition result hypotheses. - This feature is only available in select languages. Setting - this for requests in other languages has no effect at all. The - default ‘false’ value does not add punctuation to result - hypotheses. - enable_speaker_diarization: - If ‘true’, enables speaker detection for each recognized word - in the top alternative of the recognition result using a - speaker_tag provided in the WordInfo. Note: Use - diarization_config instead. - diarization_speaker_count: - If set, specifies the estimated number of speakers in the - conversation. Defaults to ‘2’. Ignored unless - enable_speaker_diarization is set to true. Note: Use - diarization_config instead. - diarization_config: - Config to enable speaker diarization and set additional - parameters to make diarization better suited for your - application. Note: When this is enabled, we send all the words - from the beginning of the audio for the top alternative in - every consecutive STREAMING responses. This is done in order - to improve our speaker tags as our models learn to identify - the speakers in the conversation over time. For non-streaming - requests, the diarization results will be provided only in the - top alternative of the FINAL SpeechRecognitionResult. - metadata: - Metadata regarding this request. - model: - Which model to select for the given request. Select the model - best suited to your domain to get best results. If a model is - not explicitly specified, then we auto-select a model based on - the parameters in the RecognitionConfig. .. raw:: html - .. raw:: html .. raw:: html .. raw:: html .. raw:: html .. raw:: - html .. raw:: html .. raw:: html .. raw:: html .. raw:: html .. - raw:: html .. - raw:: html .. raw:: html .. raw:: html - .. raw:: html - .. raw:: html .. raw:: html .. raw:: html - .. raw:: html - .. raw:: html .. raw:: html - .. raw:: html
Model - .. raw:: html Description - .. raw:: html
command_and_search .. - raw:: html Best for short - queries such as voice commands or voice search. .. raw:: html -
phone_call .. raw:: html Best for audio that originated from a - phone call (typically recorded at an 8khz sampling rate). .. - raw:: html
video .. raw:: html Best for audio that originated from - from video or includes multiple speakers. Ideally the audio is - recorded at a 16khz or greater sampling rate. This is a - premium model that costs more than the standard rate. .. - raw:: html
default .. raw:: html Best for audio that is not one of the - specific audio models. For example, long-form audio. Ideally - the audio is high-fidelity, recorded at a 16khz or greater - sampling rate. .. raw:: html
- use_enhanced: - Set to true to use an enhanced model for speech recognition. - If ``use_enhanced`` is set to true and the ``model`` field is - not set, then an appropriate enhanced model is chosen if an - enhanced model exists for the audio. If ``use_enhanced`` is - true and an enhanced version of the specified model does not - exist, then the speech is recognized using the standard - version of the specified model. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.RecognitionConfig) - }, -) -_sym_db.RegisterMessage(RecognitionConfig) - -SpeakerDiarizationConfig = _reflection.GeneratedProtocolMessageType( - "SpeakerDiarizationConfig", - (_message.Message,), - { - "DESCRIPTOR": _SPEAKERDIARIZATIONCONFIG, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """Config to enable speaker diarization. - - Attributes: - enable_speaker_diarization: - If ‘true’, enables speaker detection for each recognized word - in the top alternative of the recognition result using a - speaker_tag provided in the WordInfo. - min_speaker_count: - Minimum number of speakers in the conversation. This range - gives you more flexibility by allowing the system to - automatically determine the correct number of speakers. If not - set, the default value is 2. - max_speaker_count: - Maximum number of speakers in the conversation. This range - gives you more flexibility by allowing the system to - automatically determine the correct number of speakers. If not - set, the default value is 6. - speaker_tag: - Output only. Unused. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig) - }, -) -_sym_db.RegisterMessage(SpeakerDiarizationConfig) - -RecognitionMetadata = _reflection.GeneratedProtocolMessageType( - "RecognitionMetadata", - (_message.Message,), - { - "DESCRIPTOR": _RECOGNITIONMETADATA, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """Description of audio data to be recognized. - - Attributes: - interaction_type: - The use case most closely describing the audio content to be - recognized. - industry_naics_code_of_audio: - The industry vertical to which this speech recognition request - most closely applies. This is most indicative of the topics - contained in the audio. Use the 6-digit NAICS code to identify - the industry vertical - see https://www.naics.com/search/. - microphone_distance: - The audio type that most closely describes the audio being - recognized. - original_media_type: - The original media the speech was recorded on. - recording_device_type: - The type of device the speech was recorded with. - recording_device_name: - The device used to make the recording. Examples ‘Nexus 5X’ or - ‘Polycom SoundStation IP 6000’ or ‘POTS’ or ‘VoIP’ or - ‘Cardioid Microphone’. - original_mime_type: - Mime type of the original audio file. For example - ``audio/m4a``, ``audio/x-alaw-basic``, ``audio/mp3``, - ``audio/3gpp``. A list of possible audio mime types is - maintained at http://www.iana.org/assignments/media- - types/media-types.xhtml#audio - obfuscated_id: - Obfuscated (privacy-protected) ID of the user, to identify - number of unique users using the service. - audio_topic: - Description of the content. Eg. “Recordings of federal supreme - court hearings from 2012”. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.RecognitionMetadata) - }, -) -_sym_db.RegisterMessage(RecognitionMetadata) - -SpeechContext = _reflection.GeneratedProtocolMessageType( - "SpeechContext", - (_message.Message,), - { - "DESCRIPTOR": _SPEECHCONTEXT, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """Provides “hints” to the speech recognizer to favor specific words and - phrases in the results. - - Attributes: - phrases: - A list of strings containing words and phrases “hints” so that - the speech recognition is more likely to recognize them. This - can be used to improve the accuracy for specific words and - phrases, for example, if specific commands are typically - spoken by the user. This can also be used to add additional - words to the vocabulary of the recognizer. See `usage limits - `__. - List items can also be set to classes for groups of words that - represent common concepts that occur in natural language. For - example, rather than providing phrase hints for every month of - the year, using the $MONTH class improves the likelihood of - correctly transcribing audio that includes months. - boost: - Hint Boost. Positive value will increase the probability that - a specific phrase will be recognized over other similar - sounding phrases. The higher the boost, the higher the chance - of false positive recognition as well. Negative boost values - would correspond to anti-biasing. Anti-biasing is not enabled, - so negative boost will simply be ignored. Though ``boost`` can - accept a wide range of positive values, most use cases are - best served with values between 0 and 20. We recommend using a - binary search approach to finding the optimal value for your - use case. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.SpeechContext) - }, -) -_sym_db.RegisterMessage(SpeechContext) - -RecognitionAudio = _reflection.GeneratedProtocolMessageType( - "RecognitionAudio", - (_message.Message,), - { - "DESCRIPTOR": _RECOGNITIONAUDIO, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """Contains audio data in the encoding specified in the - ``RecognitionConfig``. Either ``content`` or ``uri`` must be supplied. - Supplying both or neither returns - [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. - See `content limits `__. - - Attributes: - audio_source: - The audio source, which is either inline content or a Google - Cloud Storage uri. - content: - The audio data bytes encoded as specified in - ``RecognitionConfig``. Note: as with all bytes fields, proto - buffers use a pure binary representation, whereas JSON - representations use base64. - uri: - URI that points to a file that contains audio data bytes as - specified in ``RecognitionConfig``. The file must not be - compressed (for example, gzip). Currently, only Google Cloud - Storage URIs are supported, which must be specified in the - following format: ``gs://bucket_name/object_name`` (other URI - formats return [google.rpc.Code.INVALID_ARGUMENT][google.rpc.C - ode.INVALID_ARGUMENT]). For more information, see `Request - URIs `__. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.RecognitionAudio) - }, -) -_sym_db.RegisterMessage(RecognitionAudio) - -RecognizeResponse = _reflection.GeneratedProtocolMessageType( - "RecognizeResponse", - (_message.Message,), - { - "DESCRIPTOR": _RECOGNIZERESPONSE, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """The only message returned to the client by the ``Recognize`` method. - It contains the result as zero or more sequential - ``SpeechRecognitionResult`` messages. - - Attributes: - results: - Sequential list of transcription results corresponding to - sequential portions of audio. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.RecognizeResponse) - }, -) -_sym_db.RegisterMessage(RecognizeResponse) - -LongRunningRecognizeResponse = _reflection.GeneratedProtocolMessageType( - "LongRunningRecognizeResponse", - (_message.Message,), - { - "DESCRIPTOR": _LONGRUNNINGRECOGNIZERESPONSE, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """The only message returned to the client by the - ``LongRunningRecognize`` method. It contains the result as zero or - more sequential ``SpeechRecognitionResult`` messages. It is included - in the ``result.response`` field of the ``Operation`` returned by the - ``GetOperation`` call of the ``google::longrunning::Operations`` - service. - - Attributes: - results: - Sequential list of transcription results corresponding to - sequential portions of audio. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse) - }, -) -_sym_db.RegisterMessage(LongRunningRecognizeResponse) - -LongRunningRecognizeMetadata = _reflection.GeneratedProtocolMessageType( - "LongRunningRecognizeMetadata", - (_message.Message,), - { - "DESCRIPTOR": _LONGRUNNINGRECOGNIZEMETADATA, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """Describes the progress of a long-running ``LongRunningRecognize`` - call. It is included in the ``metadata`` field of the ``Operation`` - returned by the ``GetOperation`` call of the - ``google::longrunning::Operations`` service. - - Attributes: - progress_percent: - Approximate percentage of audio processed thus far. Guaranteed - to be 100 when the audio is fully processed and the results - are available. - start_time: - Time when the request was received. - last_update_time: - Time of the most recent processing update. - uri: - The URI of the audio file being transcribed. Empty if the - audio was sent as byte content. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata) - }, -) -_sym_db.RegisterMessage(LongRunningRecognizeMetadata) - -StreamingRecognizeResponse = _reflection.GeneratedProtocolMessageType( - "StreamingRecognizeResponse", - (_message.Message,), - { - "DESCRIPTOR": _STREAMINGRECOGNIZERESPONSE, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """\ ``StreamingRecognizeResponse`` is the only message returned to the - client by ``StreamingRecognize``. A series of zero or more - ``StreamingRecognizeResponse`` messages are streamed back to the - client. If there is no recognizable audio, and ``single_utterance`` is - set to false, then no messages are streamed back to the client. - Here’s an example of a series of ten ``StreamingRecognizeResponse``\ s - that might be returned while processing audio: 1. results { - alternatives { transcript: “tube” } stability: 0.01 } 2. results { - alternatives { transcript: “to be a” } stability: 0.01 } 3. results { - alternatives { transcript: “to be” } stability: 0.9 } results { - alternatives { transcript: " or not to be" } stability: 0.01 } 4. - results { alternatives { transcript: “to be or not to be” confidence: - 0.92 } alternatives { transcript: “to bee or not to bee” } is_final: - true } 5. results { alternatives { transcript: " that’s" } stability: - 0.01 } 6. results { alternatives { transcript: " that is" } - stability: 0.9 } results { alternatives { transcript: " the - question" } stability: 0.01 } 7. results { alternatives { - transcript: " that is the question" confidence: 0.98 } alternatives - { transcript: " that was the question" } is_final: true } Notes: - - Only two of the above responses #4 and #7 contain final results; - they are indicated by ``is_final: true``. Concatenating these - together generates the full transcript: “to be or not to be that is - the question”. - The others contain interim ``results``. #3 and - #6 contain two interim ``results``: the first portion has a high - stability and is less likely to change; the second portion has a - low stability and is very likely to change. A UI designer might - choose to show only high stability ``results``. - The specific - ``stability`` and ``confidence`` values shown above are only for - illustrative purposes. Actual values may vary. - In each response, - only one of these fields will be set: ``error``, - ``speech_event_type``, or one or more (repeated) ``results``. - - Attributes: - error: - If set, returns a [google.rpc.Status][google.rpc.Status] - message that specifies the error for the operation. - results: - This repeated list contains zero or more results that - correspond to consecutive portions of the audio currently - being processed. It contains zero or one ``is_final=true`` - result (the newly settled portion), followed by zero or more - ``is_final=false`` results (the interim results). - speech_event_type: - Indicates the type of speech event. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.StreamingRecognizeResponse) - }, -) -_sym_db.RegisterMessage(StreamingRecognizeResponse) - -StreamingRecognitionResult = _reflection.GeneratedProtocolMessageType( - "StreamingRecognitionResult", - (_message.Message,), - { - "DESCRIPTOR": _STREAMINGRECOGNITIONRESULT, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """A streaming speech recognition result corresponding to a portion of - the audio that is currently being processed. - - Attributes: - alternatives: - May contain one or more recognition hypotheses (up to the - maximum specified in ``max_alternatives``). These alternatives - are ordered in terms of accuracy, with the top (first) - alternative being the most probable, as ranked by the - recognizer. - is_final: - If ``false``, this ``StreamingRecognitionResult`` represents - an interim result that may change. If ``true``, this is the - final time the speech service will return this particular - ``StreamingRecognitionResult``, the recognizer will not return - any further hypotheses for this portion of the transcript and - corresponding audio. - stability: - An estimate of the likelihood that the recognizer will not - change its guess about this interim result. Values range from - 0.0 (completely unstable) to 1.0 (completely stable). This - field is only provided for interim results - (``is_final=false``). The default of 0.0 is a sentinel value - indicating ``stability`` was not set. - result_end_time: - Time offset of the end of this result relative to the - beginning of the audio. - channel_tag: - For multi-channel audio, this is the channel number - corresponding to the recognized result for the audio from that - channel. For audio_channel_count = N, its output values can - range from ‘1’ to ‘N’. - language_code: - Output only. The `BCP-47 `__ language tag of the language - in this result. This language code was detected to have the - most likelihood of being spoken in the audio. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.StreamingRecognitionResult) - }, -) -_sym_db.RegisterMessage(StreamingRecognitionResult) - -SpeechRecognitionResult = _reflection.GeneratedProtocolMessageType( - "SpeechRecognitionResult", - (_message.Message,), - { - "DESCRIPTOR": _SPEECHRECOGNITIONRESULT, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """A speech recognition result corresponding to a portion of the audio. - - Attributes: - alternatives: - May contain one or more recognition hypotheses (up to the - maximum specified in ``max_alternatives``). These alternatives - are ordered in terms of accuracy, with the top (first) - alternative being the most probable, as ranked by the - recognizer. - channel_tag: - For multi-channel audio, this is the channel number - corresponding to the recognized result for the audio from that - channel. For audio_channel_count = N, its output values can - range from ‘1’ to ‘N’. - language_code: - Output only. The `BCP-47 `__ language tag of the language - in this result. This language code was detected to have the - most likelihood of being spoken in the audio. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.SpeechRecognitionResult) - }, -) -_sym_db.RegisterMessage(SpeechRecognitionResult) - -SpeechRecognitionAlternative = _reflection.GeneratedProtocolMessageType( - "SpeechRecognitionAlternative", - (_message.Message,), - { - "DESCRIPTOR": _SPEECHRECOGNITIONALTERNATIVE, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """Alternative hypotheses (a.k.a. n-best list). - - Attributes: - transcript: - Transcript text representing the words that the user spoke. - confidence: - The confidence estimate between 0.0 and 1.0. A higher number - indicates an estimated greater likelihood that the recognized - words are correct. This field is set only for the top - alternative of a non-streaming result or, of a streaming - result where ``is_final=true``. This field is not guaranteed - to be accurate and users should not rely on it to be always - provided. The default of 0.0 is a sentinel value indicating - ``confidence`` was not set. - words: - A list of word-specific information for each recognized word. - Note: When ``enable_speaker_diarization`` is true, you will - see all the words from the beginning of the audio. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative) - }, -) -_sym_db.RegisterMessage(SpeechRecognitionAlternative) - -WordInfo = _reflection.GeneratedProtocolMessageType( - "WordInfo", - (_message.Message,), - { - "DESCRIPTOR": _WORDINFO, - "__module__": "google.cloud.speech_v1p1beta1.proto.cloud_speech_pb2", - "__doc__": """Word-specific information for recognized words. - - Attributes: - start_time: - Time offset relative to the beginning of the audio, and - corresponding to the start of the spoken word. This field is - only set if ``enable_word_time_offsets=true`` and only in the - top hypothesis. This is an experimental feature and the - accuracy of the time offset can vary. - end_time: - Time offset relative to the beginning of the audio, and - corresponding to the end of the spoken word. This field is - only set if ``enable_word_time_offsets=true`` and only in the - top hypothesis. This is an experimental feature and the - accuracy of the time offset can vary. - word: - The word corresponding to this set of information. - confidence: - The confidence estimate between 0.0 and 1.0. A higher number - indicates an estimated greater likelihood that the recognized - words are correct. This field is set only for the top - alternative of a non-streaming result or, of a streaming - result where ``is_final=true``. This field is not guaranteed - to be accurate and users should not rely on it to be always - provided. The default of 0.0 is a sentinel value indicating - ``confidence`` was not set. - speaker_tag: - Output only. A distinct integer value is assigned for every - speaker within the audio. This field specifies which one of - those speakers was detected to have spoken this word. Value - ranges from ‘1’ to diarization_speaker_count. speaker_tag is - set if enable_speaker_diarization = ‘true’ and only in the top - alternative. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.WordInfo) - }, -) -_sym_db.RegisterMessage(WordInfo) - - -DESCRIPTOR._options = None -_RECOGNIZEREQUEST.fields_by_name["config"]._options = None -_RECOGNIZEREQUEST.fields_by_name["audio"]._options = None -_LONGRUNNINGRECOGNIZEREQUEST.fields_by_name["config"]._options = None -_LONGRUNNINGRECOGNIZEREQUEST.fields_by_name["audio"]._options = None -_STREAMINGRECOGNITIONCONFIG.fields_by_name["config"]._options = None -_RECOGNITIONCONFIG.fields_by_name["language_code"]._options = None -_RECOGNITIONCONFIG.fields_by_name["enable_speaker_diarization"]._options = None -_RECOGNITIONCONFIG.fields_by_name["diarization_speaker_count"]._options = None -_SPEAKERDIARIZATIONCONFIG.fields_by_name["speaker_tag"]._options = None -_RECOGNITIONMETADATA.fields_by_name["obfuscated_id"]._options = None -_LONGRUNNINGRECOGNIZEMETADATA.fields_by_name["uri"]._options = None -_STREAMINGRECOGNITIONRESULT.fields_by_name["language_code"]._options = None -_SPEECHRECOGNITIONRESULT.fields_by_name["language_code"]._options = None -_WORDINFO.fields_by_name["speaker_tag"]._options = None - -_SPEECH = _descriptor.ServiceDescriptor( - name="Speech", - full_name="google.cloud.speech.v1p1beta1.Speech", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\025speech.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=4836, - serialized_end=5478, - methods=[ - _descriptor.MethodDescriptor( - name="Recognize", - full_name="google.cloud.speech.v1p1beta1.Speech.Recognize", - index=0, - containing_service=None, - input_type=_RECOGNIZEREQUEST, - output_type=_RECOGNIZERESPONSE, - serialized_options=b'\202\323\344\223\002 "\033/v1p1beta1/speech:recognize:\001*\332A\014config,audio', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="LongRunningRecognize", - full_name="google.cloud.speech.v1p1beta1.Speech.LongRunningRecognize", - index=1, - containing_service=None, - input_type=_LONGRUNNINGRECOGNIZEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002+"&/v1p1beta1/speech:longrunningrecognize:\001*\332A\014config,audio\312A<\n\034LongRunningRecognizeResponse\022\034LongRunningRecognizeMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="StreamingRecognize", - full_name="google.cloud.speech.v1p1beta1.Speech.StreamingRecognize", - index=2, - containing_service=None, - input_type=_STREAMINGRECOGNIZEREQUEST, - output_type=_STREAMINGRECOGNIZERESPONSE, - serialized_options=None, - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_SPEECH) - -DESCRIPTOR.services_by_name["Speech"] = _SPEECH - -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/cloud_speech_pb2_grpc.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/cloud_speech_pb2_grpc.py deleted file mode 100644 index a5e3f268c906..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/cloud_speech_pb2_grpc.py +++ /dev/null @@ -1,181 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.speech_v1p1beta1.proto import ( - cloud_speech_pb2 as google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) - - -class SpeechStub(object): - """Service that implements Google Cloud Speech API. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.Recognize = channel.unary_unary( - "/google.cloud.speech.v1p1beta1.Speech/Recognize", - request_serializer=google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.RecognizeRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.RecognizeResponse.FromString, - ) - self.LongRunningRecognize = channel.unary_unary( - "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize", - request_serializer=google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.LongRunningRecognizeRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.StreamingRecognize = channel.stream_stream( - "/google.cloud.speech.v1p1beta1.Speech/StreamingRecognize", - request_serializer=google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeResponse.FromString, - ) - - -class SpeechServicer(object): - """Service that implements Google Cloud Speech API. - """ - - def Recognize(self, request, context): - """Performs synchronous speech recognition: receive results after all audio - has been sent and processed. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def LongRunningRecognize(self, request, context): - """Performs asynchronous speech recognition: receive results via the - google.longrunning.Operations interface. Returns either an - `Operation.error` or an `Operation.response` which contains - a `LongRunningRecognizeResponse` message. - For more information on asynchronous speech recognition, see the - [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def StreamingRecognize(self, request_iterator, context): - """Performs bidirectional streaming speech recognition: receive results while - sending audio. This method is only available via the gRPC API (not REST). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_SpeechServicer_to_server(servicer, server): - rpc_method_handlers = { - "Recognize": grpc.unary_unary_rpc_method_handler( - servicer.Recognize, - request_deserializer=google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.RecognizeRequest.FromString, - response_serializer=google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.RecognizeResponse.SerializeToString, - ), - "LongRunningRecognize": grpc.unary_unary_rpc_method_handler( - servicer.LongRunningRecognize, - request_deserializer=google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.LongRunningRecognizeRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "StreamingRecognize": grpc.stream_stream_rpc_method_handler( - servicer.StreamingRecognize, - request_deserializer=google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeRequest.FromString, - response_serializer=google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.cloud.speech.v1p1beta1.Speech", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class Speech(object): - """Service that implements Google Cloud Speech API. - """ - - @staticmethod - def Recognize( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.speech.v1p1beta1.Speech/Recognize", - google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.RecognizeRequest.SerializeToString, - google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.RecognizeResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def LongRunningRecognize( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize", - google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.LongRunningRecognizeRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def StreamingRecognize( - request_iterator, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.stream_stream( - request_iterator, - target, - "/google.cloud.speech.v1p1beta1.Speech/StreamingRecognize", - google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeRequest.SerializeToString, - google_dot_cloud_dot_speech__v1p1beta1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/resource_pb2.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/resource_pb2.py deleted file mode 100644 index 701d9b569fb5..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/resource_pb2.py +++ /dev/null @@ -1,508 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/speech_v1p1beta1/proto/resource.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/speech_v1p1beta1/proto/resource.proto", - package="google.cloud.speech.v1p1beta1", - syntax="proto3", - serialized_options=b"\n!com.google.cloud.speech.v1p1beta1B\023SpeechResourceProtoP\001ZCgoogle.golang.org/genproto/googleapis/cloud/speech/v1p1beta1;speech\370\001\001\242\002\003GCS", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n2google/cloud/speech_v1p1beta1/proto/resource.proto\x12\x1dgoogle.cloud.speech.v1p1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x19google/api/resource.proto"\x83\x02\n\x0b\x43ustomClass\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0f\x63ustom_class_id\x18\x02 \x01(\t\x12\x43\n\x05items\x18\x03 \x03(\x0b\x32\x34.google.cloud.speech.v1p1beta1.CustomClass.ClassItem\x1a\x1a\n\tClassItem\x12\r\n\x05value\x18\x01 \x01(\t:l\xea\x41i\n!speech.googleapis.com/CustomClass\x12\x44projects/{project}/locations/{location}/customClasses/{custom_class}"\xf9\x01\n\tPhraseSet\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\x07phrases\x18\x02 \x03(\x0b\x32/.google.cloud.speech.v1p1beta1.PhraseSet.Phrase\x12\r\n\x05\x62oost\x18\x04 \x01(\x02\x1a&\n\x06Phrase\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x62oost\x18\x02 \x01(\x02:e\xea\x41\x62\n\x1fspeech.googleapis.com/PhraseSet\x12?projects/{project}/locations/{location}/phraseSets/{phrase_set}"\x95\x01\n\x10SpeechAdaptation\x12=\n\x0bphrase_sets\x18\x01 \x03(\x0b\x32(.google.cloud.speech.v1p1beta1.PhraseSet\x12\x42\n\x0e\x63ustom_classes\x18\x02 \x03(\x0b\x32*.google.cloud.speech.v1p1beta1.CustomClassB\x88\x01\n!com.google.cloud.speech.v1p1beta1B\x13SpeechResourceProtoP\x01ZCgoogle.golang.org/genproto/googleapis/cloud/speech/v1p1beta1;speech\xf8\x01\x01\xa2\x02\x03GCSb\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - ], -) - - -_CUSTOMCLASS_CLASSITEM = _descriptor.Descriptor( - name="ClassItem", - full_name="google.cloud.speech.v1p1beta1.CustomClass.ClassItem", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.speech.v1p1beta1.CustomClass.ClassItem.value", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=266, - serialized_end=292, -) - -_CUSTOMCLASS = _descriptor.Descriptor( - name="CustomClass", - full_name="google.cloud.speech.v1p1beta1.CustomClass", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.speech.v1p1beta1.CustomClass.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="custom_class_id", - full_name="google.cloud.speech.v1p1beta1.CustomClass.custom_class_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="items", - full_name="google.cloud.speech.v1p1beta1.CustomClass.items", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_CUSTOMCLASS_CLASSITEM,], - enum_types=[], - serialized_options=b"\352Ai\n!speech.googleapis.com/CustomClass\022Dprojects/{project}/locations/{location}/customClasses/{custom_class}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=143, - serialized_end=402, -) - - -_PHRASESET_PHRASE = _descriptor.Descriptor( - name="Phrase", - full_name="google.cloud.speech.v1p1beta1.PhraseSet.Phrase", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.speech.v1p1beta1.PhraseSet.Phrase.value", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="boost", - full_name="google.cloud.speech.v1p1beta1.PhraseSet.Phrase.boost", - index=1, - number=2, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=513, - serialized_end=551, -) - -_PHRASESET = _descriptor.Descriptor( - name="PhraseSet", - full_name="google.cloud.speech.v1p1beta1.PhraseSet", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.speech.v1p1beta1.PhraseSet.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="phrases", - full_name="google.cloud.speech.v1p1beta1.PhraseSet.phrases", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="boost", - full_name="google.cloud.speech.v1p1beta1.PhraseSet.boost", - index=2, - number=4, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_PHRASESET_PHRASE,], - enum_types=[], - serialized_options=b"\352Ab\n\037speech.googleapis.com/PhraseSet\022?projects/{project}/locations/{location}/phraseSets/{phrase_set}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=405, - serialized_end=654, -) - - -_SPEECHADAPTATION = _descriptor.Descriptor( - name="SpeechAdaptation", - full_name="google.cloud.speech.v1p1beta1.SpeechAdaptation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="phrase_sets", - full_name="google.cloud.speech.v1p1beta1.SpeechAdaptation.phrase_sets", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="custom_classes", - full_name="google.cloud.speech.v1p1beta1.SpeechAdaptation.custom_classes", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=657, - serialized_end=806, -) - -_CUSTOMCLASS_CLASSITEM.containing_type = _CUSTOMCLASS -_CUSTOMCLASS.fields_by_name["items"].message_type = _CUSTOMCLASS_CLASSITEM -_PHRASESET_PHRASE.containing_type = _PHRASESET -_PHRASESET.fields_by_name["phrases"].message_type = _PHRASESET_PHRASE -_SPEECHADAPTATION.fields_by_name["phrase_sets"].message_type = _PHRASESET -_SPEECHADAPTATION.fields_by_name["custom_classes"].message_type = _CUSTOMCLASS -DESCRIPTOR.message_types_by_name["CustomClass"] = _CUSTOMCLASS -DESCRIPTOR.message_types_by_name["PhraseSet"] = _PHRASESET -DESCRIPTOR.message_types_by_name["SpeechAdaptation"] = _SPEECHADAPTATION -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -CustomClass = _reflection.GeneratedProtocolMessageType( - "CustomClass", - (_message.Message,), - { - "ClassItem": _reflection.GeneratedProtocolMessageType( - "ClassItem", - (_message.Message,), - { - "DESCRIPTOR": _CUSTOMCLASS_CLASSITEM, - "__module__": "google.cloud.speech_v1p1beta1.proto.resource_pb2", - "__doc__": """An item of the class. - - Attributes: - value: - The class item’s value. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.CustomClass.ClassItem) - }, - ), - "DESCRIPTOR": _CUSTOMCLASS, - "__module__": "google.cloud.speech_v1p1beta1.proto.resource_pb2", - "__doc__": """A set of words or phrases that represents a common concept likely to - appear in your audio, for example a list of passenger ship names. - CustomClass items can be substituted into placeholders that you set in - PhraseSet phrases. - - Attributes: - name: - The resource name of the custom class. - custom_class_id: - If this custom class is a resource, the custom_class_id is the - resource id of the CustomClass. - items: - A collection of class items. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.CustomClass) - }, -) -_sym_db.RegisterMessage(CustomClass) -_sym_db.RegisterMessage(CustomClass.ClassItem) - -PhraseSet = _reflection.GeneratedProtocolMessageType( - "PhraseSet", - (_message.Message,), - { - "Phrase": _reflection.GeneratedProtocolMessageType( - "Phrase", - (_message.Message,), - { - "DESCRIPTOR": _PHRASESET_PHRASE, - "__module__": "google.cloud.speech_v1p1beta1.proto.resource_pb2", - "__doc__": """A phrases containing words and phrase “hints” so that the speech - recognition is more likely to recognize them. This can be used to - improve the accuracy for specific words and phrases, for example, if - specific commands are typically spoken by the user. This can also be - used to add additional words to the vocabulary of the recognizer. See - `usage limits `__. List items can also include pre-built or - custom classes containing groups of words that represent common - concepts that occur in natural language. For example, rather than - providing a phrase hint for every month of the year (e.g. “i was born - in january”, “i was born in febuary”, …), use the pre-built ``$MONTH`` - class improves the likelihood of correctly transcribing audio that - includes months (e.g. "i was born in :math:`month"). To refer to pre- - built classes, use the class' symbol prepended with ``\ ``e.g.``\$MONTH\ ``. To refer to custom classes that were defined inline in the - request, set the class's``\ custom_class_id\ ``to a string unique to - all class resources and inline classes. Then use the class' id wrapped - in $``\ {…}``e.g. "${my-months}". To refer to custom classes - resources, use the class' id wrapped in``\ :math:`{}` (e.g. ``\ {my-months}``). - - Attributes: - value: - The phrase itself. - boost: - Hint Boost. Overrides the boost set at the phrase set level. - Positive value will increase the probability that a specific - phrase will be recognized over other similar sounding phrases. - The higher the boost, the higher the chance of false positive - recognition as well. Negative boost values would correspond to - anti-biasing. Anti-biasing is not enabled, so negative boost - will simply be ignored. Though ``boost`` can accept a wide - range of positive values, most use cases are best served with - values between 0 and 20. We recommend using a binary search - approach to finding the optimal value for your use case. - Speech recognition will skip PhraseSets with a boost value of - 0. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.PhraseSet.Phrase) - }, - ), - "DESCRIPTOR": _PHRASESET, - "__module__": "google.cloud.speech_v1p1beta1.proto.resource_pb2", - "__doc__": """Provides “hints” to the speech recognizer to favor specific words and - phrases in the results. - - Attributes: - name: - The resource name of the phrase set. - phrases: - A list of word and phrases. - boost: - Hint Boost. Positive value will increase the probability that - a specific phrase will be recognized over other similar - sounding phrases. The higher the boost, the higher the chance - of false positive recognition as well. Negative boost values - would correspond to anti-biasing. Anti-biasing is not enabled, - so negative boost will simply be ignored. Though ``boost`` can - accept a wide range of positive values, most use cases are - best served with values between 0 (exclusive) and 20. We - recommend using a binary search approach to finding the - optimal value for your use case. Speech recognition will skip - PhraseSets with a boost value of 0. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.PhraseSet) - }, -) -_sym_db.RegisterMessage(PhraseSet) -_sym_db.RegisterMessage(PhraseSet.Phrase) - -SpeechAdaptation = _reflection.GeneratedProtocolMessageType( - "SpeechAdaptation", - (_message.Message,), - { - "DESCRIPTOR": _SPEECHADAPTATION, - "__module__": "google.cloud.speech_v1p1beta1.proto.resource_pb2", - "__doc__": """Speech adaptation configuration. - - Attributes: - phrase_sets: - A collection of phrase sets. To specify the hints inline, - leave the phrase set’s ``name`` blank and fill in the rest of - its fields. Any phrase set can use any custom class. - custom_classes: - A collection of custom classes. To specify the classes inline, - leave the class’ ``name`` blank and fill in the rest of its - fields, giving it a unique ``custom_class_id``. Refer to the - inline defined class in phrase hints by its - ``custom_class_id``. - """, - # @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.SpeechAdaptation) - }, -) -_sym_db.RegisterMessage(SpeechAdaptation) - - -DESCRIPTOR._options = None -_CUSTOMCLASS._options = None -_PHRASESET._options = None -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/resource_pb2_grpc.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/resource_pb2_grpc.py deleted file mode 100644 index 8a9393943bdf..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/proto/resource_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/py.typed b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/py.typed new file mode 100644 index 000000000000..02081c09b9ab --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-speech package uses inline types. diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/__init__.py new file mode 100644 index 000000000000..42ffdf2bc43d --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-python-speech/google/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/__init__.py similarity index 66% rename from packages/google-cloud-python-speech/google/__init__.py rename to packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/__init__.py index dd3a9f485275..72f816da4255 100644 --- a/packages/google-cloud-python-speech/google/__init__.py +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/__init__.py @@ -1,22 +1,24 @@ -# Copyright 2018 Google LLC +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil +from .client import SpeechClient +from .async_client import SpeechAsyncClient - __path__ = pkgutil.extend_path(__path__, __name__) +__all__ = ( + "SpeechClient", + "SpeechAsyncClient", +) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/async_client.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/async_client.py new file mode 100644 index 000000000000..c2b596e65151 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/async_client.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, AsyncIterable, AsyncIterator, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.speech_v1p1beta1.types import cloud_speech +from google.rpc import status_pb2 as status # type: ignore + +from .transports.base import SpeechTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import SpeechGrpcAsyncIOTransport +from .client import SpeechClient + + +class SpeechAsyncClient: + """Service that implements Google Cloud Speech API.""" + + _client: SpeechClient + + DEFAULT_ENDPOINT = SpeechClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = SpeechClient.DEFAULT_MTLS_ENDPOINT + + custom_class_path = staticmethod(SpeechClient.custom_class_path) + parse_custom_class_path = staticmethod(SpeechClient.parse_custom_class_path) + phrase_set_path = staticmethod(SpeechClient.phrase_set_path) + parse_phrase_set_path = staticmethod(SpeechClient.parse_phrase_set_path) + + from_service_account_file = SpeechClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(SpeechClient).get_transport_class, type(SpeechClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, SpeechTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the speech client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.SpeechTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = SpeechClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def recognize( + self, + request: cloud_speech.RecognizeRequest = None, + *, + config: cloud_speech.RecognitionConfig = None, + audio: cloud_speech.RecognitionAudio = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cloud_speech.RecognizeResponse: + r"""Performs synchronous speech recognition: receive + results after all audio has been sent and processed. + + Args: + request (:class:`~.cloud_speech.RecognizeRequest`): + The request object. The top-level message sent by the + client for the `Recognize` method. + config (:class:`~.cloud_speech.RecognitionConfig`): + Required. Provides information to the + recognizer that specifies how to process + the request. + This corresponds to the ``config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + audio (:class:`~.cloud_speech.RecognitionAudio`): + Required. The audio data to be + recognized. + This corresponds to the ``audio`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cloud_speech.RecognizeResponse: + The only message returned to the client by the + ``Recognize`` method. It contains the result as zero or + more sequential ``SpeechRecognitionResult`` messages. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([config, audio]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cloud_speech.RecognizeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if config is not None: + request.config = config + if audio is not None: + request.audio = audio + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.recognize, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5000.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def long_running_recognize( + self, + request: cloud_speech.LongRunningRecognizeRequest = None, + *, + config: cloud_speech.RecognitionConfig = None, + audio: cloud_speech.RecognitionAudio = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Performs asynchronous speech recognition: receive results via + the google.longrunning.Operations interface. Returns either an + ``Operation.error`` or an ``Operation.response`` which contains + a ``LongRunningRecognizeResponse`` message. For more information + on asynchronous speech recognition, see the + `how-to `__. + + Args: + request (:class:`~.cloud_speech.LongRunningRecognizeRequest`): + The request object. The top-level message sent by the + client for the `LongRunningRecognize` method. + config (:class:`~.cloud_speech.RecognitionConfig`): + Required. Provides information to the + recognizer that specifies how to process + the request. + This corresponds to the ``config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + audio (:class:`~.cloud_speech.RecognitionAudio`): + Required. The audio data to be + recognized. + This corresponds to the ``audio`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.cloud_speech.LongRunningRecognizeResponse``: + The only message returned to the client by the + ``LongRunningRecognize`` method. It contains the result + as zero or more sequential ``SpeechRecognitionResult`` + messages. It is included in the ``result.response`` + field of the ``Operation`` returned by the + ``GetOperation`` call of the + ``google::longrunning::Operations`` service. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([config, audio]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = cloud_speech.LongRunningRecognizeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if config is not None: + request.config = config + if audio is not None: + request.audio = audio + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.long_running_recognize, + default_timeout=5000.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + cloud_speech.LongRunningRecognizeResponse, + metadata_type=cloud_speech.LongRunningRecognizeMetadata, + ) + + # Done; return the response. + return response + + def streaming_recognize( + self, + requests: AsyncIterator[cloud_speech.StreamingRecognizeRequest] = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> AsyncIterable[cloud_speech.StreamingRecognizeResponse]: + r"""Performs bidirectional streaming speech recognition: + receive results while sending audio. This method is only + available via the gRPC API (not REST). + + Args: + requests (AsyncIterator[`~.cloud_speech.StreamingRecognizeRequest`]): + The request object AsyncIterator. The top-level message sent by the + client for the `StreamingRecognize` method. Multiple + `StreamingRecognizeRequest` messages are sent. The first + message must contain a `streaming_config` message and + must not contain `audio_content`. All subsequent + messages must contain `audio_content` and must not + contain a `streaming_config` message. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[~.cloud_speech.StreamingRecognizeResponse]: + ``StreamingRecognizeResponse`` is the only message + returned to the client by ``StreamingRecognize``. A + series of zero or more ``StreamingRecognizeResponse`` + messages are streamed back to the client. If there is no + recognizable audio, and ``single_utterance`` is set to + false, then no messages are streamed back to the client. + + Here's an example of a series of ten + ``StreamingRecognizeResponse``\ s that might be returned + while processing audio: + + 1. results { alternatives { transcript: "tube" } + stability: 0.01 } + + 2. results { alternatives { transcript: "to be a" } + stability: 0.01 } + + 3. results { alternatives { transcript: "to be" } + stability: 0.9 } results { alternatives { transcript: + " or not to be" } stability: 0.01 } + + 4. results { alternatives { transcript: "to be or not to + be" confidence: 0.92 } alternatives { transcript: "to + bee or not to bee" } is_final: true } + + 5. results { alternatives { transcript: " that's" } + stability: 0.01 } + + 6. results { alternatives { transcript: " that is" } + stability: 0.9 } results { alternatives { transcript: + " the question" } stability: 0.01 } + + 7. results { alternatives { transcript: " that is the + question" confidence: 0.98 } alternatives { + transcript: " that was the question" } is_final: true + } + + Notes: + + - Only two of the above responses #4 and #7 contain + final results; they are indicated by + ``is_final: true``. Concatenating these together + generates the full transcript: "to be or not to be + that is the question". + + - The others contain interim ``results``. #3 and #6 + contain two interim ``results``: the first portion + has a high stability and is less likely to change; + the second portion has a low stability and is very + likely to change. A UI designer might choose to show + only high stability ``results``. + + - The specific ``stability`` and ``confidence`` values + shown above are only for illustrative purposes. + Actual values may vary. + + - In each response, only one of these fields will be + set: ``error``, ``speech_event_type``, or one or more + (repeated) ``results``. + + """ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.streaming_recognize, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5000.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-speech",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("SpeechAsyncClient",) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/client.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/client.py new file mode 100644 index 000000000000..2d7eaae490d3 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/client.py @@ -0,0 +1,580 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import ( + Callable, + Dict, + Optional, + Iterable, + Iterator, + Sequence, + Tuple, + Type, + Union, +) +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.speech_v1p1beta1.types import cloud_speech +from google.rpc import status_pb2 as status # type: ignore + +from .transports.base import SpeechTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import SpeechGrpcTransport +from .transports.grpc_asyncio import SpeechGrpcAsyncIOTransport + + +class SpeechClientMeta(type): + """Metaclass for the Speech client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[SpeechTransport]] + _transport_registry["grpc"] = SpeechGrpcTransport + _transport_registry["grpc_asyncio"] = SpeechGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[SpeechTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SpeechClient(metaclass=SpeechClientMeta): + """Service that implements Google Cloud Speech API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "speech.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def custom_class_path(project: str, location: str, custom_class: str,) -> str: + """Return a fully-qualified custom_class string.""" + return "projects/{project}/locations/{location}/customClasses/{custom_class}".format( + project=project, location=location, custom_class=custom_class, + ) + + @staticmethod + def parse_custom_class_path(path: str) -> Dict[str, str]: + """Parse a custom_class path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/customClasses/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def phrase_set_path(project: str, location: str, phrase_set: str,) -> str: + """Return a fully-qualified phrase_set string.""" + return "projects/{project}/locations/{location}/phraseSets/{phrase_set}".format( + project=project, location=location, phrase_set=phrase_set, + ) + + @staticmethod + def parse_phrase_set_path(path: str) -> Dict[str, str]: + """Parse a phrase_set path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/phraseSets/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, SpeechTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the speech client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.SpeechTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SpeechTransport): + # transport is a SpeechTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def recognize( + self, + request: cloud_speech.RecognizeRequest = None, + *, + config: cloud_speech.RecognitionConfig = None, + audio: cloud_speech.RecognitionAudio = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cloud_speech.RecognizeResponse: + r"""Performs synchronous speech recognition: receive + results after all audio has been sent and processed. + + Args: + request (:class:`~.cloud_speech.RecognizeRequest`): + The request object. The top-level message sent by the + client for the `Recognize` method. + config (:class:`~.cloud_speech.RecognitionConfig`): + Required. Provides information to the + recognizer that specifies how to process + the request. + This corresponds to the ``config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + audio (:class:`~.cloud_speech.RecognitionAudio`): + Required. The audio data to be + recognized. + This corresponds to the ``audio`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.cloud_speech.RecognizeResponse: + The only message returned to the client by the + ``Recognize`` method. It contains the result as zero or + more sequential ``SpeechRecognitionResult`` messages. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([config, audio]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a cloud_speech.RecognizeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cloud_speech.RecognizeRequest): + request = cloud_speech.RecognizeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if config is not None: + request.config = config + if audio is not None: + request.audio = audio + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.recognize] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def long_running_recognize( + self, + request: cloud_speech.LongRunningRecognizeRequest = None, + *, + config: cloud_speech.RecognitionConfig = None, + audio: cloud_speech.RecognitionAudio = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Performs asynchronous speech recognition: receive results via + the google.longrunning.Operations interface. Returns either an + ``Operation.error`` or an ``Operation.response`` which contains + a ``LongRunningRecognizeResponse`` message. For more information + on asynchronous speech recognition, see the + `how-to `__. + + Args: + request (:class:`~.cloud_speech.LongRunningRecognizeRequest`): + The request object. The top-level message sent by the + client for the `LongRunningRecognize` method. + config (:class:`~.cloud_speech.RecognitionConfig`): + Required. Provides information to the + recognizer that specifies how to process + the request. + This corresponds to the ``config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + audio (:class:`~.cloud_speech.RecognitionAudio`): + Required. The audio data to be + recognized. + This corresponds to the ``audio`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.cloud_speech.LongRunningRecognizeResponse``: + The only message returned to the client by the + ``LongRunningRecognize`` method. It contains the result + as zero or more sequential ``SpeechRecognitionResult`` + messages. It is included in the ``result.response`` + field of the ``Operation`` returned by the + ``GetOperation`` call of the + ``google::longrunning::Operations`` service. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([config, audio]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a cloud_speech.LongRunningRecognizeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cloud_speech.LongRunningRecognizeRequest): + request = cloud_speech.LongRunningRecognizeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if config is not None: + request.config = config + if audio is not None: + request.audio = audio + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.long_running_recognize] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + cloud_speech.LongRunningRecognizeResponse, + metadata_type=cloud_speech.LongRunningRecognizeMetadata, + ) + + # Done; return the response. + return response + + def streaming_recognize( + self, + requests: Iterator[cloud_speech.StreamingRecognizeRequest] = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[cloud_speech.StreamingRecognizeResponse]: + r"""Performs bidirectional streaming speech recognition: + receive results while sending audio. This method is only + available via the gRPC API (not REST). + + Args: + requests (Iterator[`~.cloud_speech.StreamingRecognizeRequest`]): + The request object iterator. The top-level message sent by the + client for the `StreamingRecognize` method. Multiple + `StreamingRecognizeRequest` messages are sent. The first + message must contain a `streaming_config` message and + must not contain `audio_content`. All subsequent + messages must contain `audio_content` and must not + contain a `streaming_config` message. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[~.cloud_speech.StreamingRecognizeResponse]: + ``StreamingRecognizeResponse`` is the only message + returned to the client by ``StreamingRecognize``. A + series of zero or more ``StreamingRecognizeResponse`` + messages are streamed back to the client. If there is no + recognizable audio, and ``single_utterance`` is set to + false, then no messages are streamed back to the client. + + Here's an example of a series of ten + ``StreamingRecognizeResponse``\ s that might be returned + while processing audio: + + 1. results { alternatives { transcript: "tube" } + stability: 0.01 } + + 2. results { alternatives { transcript: "to be a" } + stability: 0.01 } + + 3. results { alternatives { transcript: "to be" } + stability: 0.9 } results { alternatives { transcript: + " or not to be" } stability: 0.01 } + + 4. results { alternatives { transcript: "to be or not to + be" confidence: 0.92 } alternatives { transcript: "to + bee or not to bee" } is_final: true } + + 5. results { alternatives { transcript: " that's" } + stability: 0.01 } + + 6. results { alternatives { transcript: " that is" } + stability: 0.9 } results { alternatives { transcript: + " the question" } stability: 0.01 } + + 7. results { alternatives { transcript: " that is the + question" confidence: 0.98 } alternatives { + transcript: " that was the question" } is_final: true + } + + Notes: + + - Only two of the above responses #4 and #7 contain + final results; they are indicated by + ``is_final: true``. Concatenating these together + generates the full transcript: "to be or not to be + that is the question". + + - The others contain interim ``results``. #3 and #6 + contain two interim ``results``: the first portion + has a high stability and is less likely to change; + the second portion has a low stability and is very + likely to change. A UI designer might choose to show + only high stability ``results``. + + - The specific ``stability`` and ``confidence`` values + shown above are only for illustrative purposes. + Actual values may vary. + + - In each response, only one of these fields will be + set: ``error``, ``speech_event_type``, or one or more + (repeated) ``results``. + + """ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.streaming_recognize] + + # Send the request. + response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-speech",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("SpeechClient",) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/__init__.py new file mode 100644 index 000000000000..3ec5f07eb105 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import SpeechTransport +from .grpc import SpeechGrpcTransport +from .grpc_asyncio import SpeechGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SpeechTransport]] +_transport_registry["grpc"] = SpeechGrpcTransport +_transport_registry["grpc_asyncio"] = SpeechGrpcAsyncIOTransport + + +__all__ = ( + "SpeechTransport", + "SpeechGrpcTransport", + "SpeechGrpcAsyncIOTransport", +) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/base.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/base.py new file mode 100644 index 000000000000..cb63618230e9 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/base.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.speech_v1p1beta1.types import cloud_speech +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-speech",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class SpeechTransport(abc.ABC): + """Abstract transport class for Speech.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "speech.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.recognize: gapic_v1.method.wrap_method( + self.recognize, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5000.0, + client_info=client_info, + ), + self.long_running_recognize: gapic_v1.method.wrap_method( + self.long_running_recognize, + default_timeout=5000.0, + client_info=client_info, + ), + self.streaming_recognize: gapic_v1.method.wrap_method( + self.streaming_recognize, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=5000.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def recognize( + self, + ) -> typing.Callable[ + [cloud_speech.RecognizeRequest], + typing.Union[ + cloud_speech.RecognizeResponse, + typing.Awaitable[cloud_speech.RecognizeResponse], + ], + ]: + raise NotImplementedError() + + @property + def long_running_recognize( + self, + ) -> typing.Callable[ + [cloud_speech.LongRunningRecognizeRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def streaming_recognize( + self, + ) -> typing.Callable[ + [cloud_speech.StreamingRecognizeRequest], + typing.Union[ + cloud_speech.StreamingRecognizeResponse, + typing.Awaitable[cloud_speech.StreamingRecognizeResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("SpeechTransport",) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/grpc.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/grpc.py new file mode 100644 index 000000000000..35de4eaa341a --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/grpc.py @@ -0,0 +1,340 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.speech_v1p1beta1.types import cloud_speech +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import SpeechTransport, DEFAULT_CLIENT_INFO + + +class SpeechGrpcTransport(SpeechTransport): + """gRPC backend transport for Speech. + + Service that implements Google Cloud Speech API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "speech.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "speech.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def recognize( + self, + ) -> Callable[[cloud_speech.RecognizeRequest], cloud_speech.RecognizeResponse]: + r"""Return a callable for the recognize method over gRPC. + + Performs synchronous speech recognition: receive + results after all audio has been sent and processed. + + Returns: + Callable[[~.RecognizeRequest], + ~.RecognizeResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "recognize" not in self._stubs: + self._stubs["recognize"] = self.grpc_channel.unary_unary( + "/google.cloud.speech.v1p1beta1.Speech/Recognize", + request_serializer=cloud_speech.RecognizeRequest.serialize, + response_deserializer=cloud_speech.RecognizeResponse.deserialize, + ) + return self._stubs["recognize"] + + @property + def long_running_recognize( + self, + ) -> Callable[[cloud_speech.LongRunningRecognizeRequest], operations.Operation]: + r"""Return a callable for the long running recognize method over gRPC. + + Performs asynchronous speech recognition: receive results via + the google.longrunning.Operations interface. Returns either an + ``Operation.error`` or an ``Operation.response`` which contains + a ``LongRunningRecognizeResponse`` message. For more information + on asynchronous speech recognition, see the + `how-to `__. + + Returns: + Callable[[~.LongRunningRecognizeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "long_running_recognize" not in self._stubs: + self._stubs["long_running_recognize"] = self.grpc_channel.unary_unary( + "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize", + request_serializer=cloud_speech.LongRunningRecognizeRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["long_running_recognize"] + + @property + def streaming_recognize( + self, + ) -> Callable[ + [cloud_speech.StreamingRecognizeRequest], + cloud_speech.StreamingRecognizeResponse, + ]: + r"""Return a callable for the streaming recognize method over gRPC. + + Performs bidirectional streaming speech recognition: + receive results while sending audio. This method is only + available via the gRPC API (not REST). + + Returns: + Callable[[~.StreamingRecognizeRequest], + ~.StreamingRecognizeResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "streaming_recognize" not in self._stubs: + self._stubs["streaming_recognize"] = self.grpc_channel.stream_stream( + "/google.cloud.speech.v1p1beta1.Speech/StreamingRecognize", + request_serializer=cloud_speech.StreamingRecognizeRequest.serialize, + response_deserializer=cloud_speech.StreamingRecognizeResponse.deserialize, + ) + return self._stubs["streaming_recognize"] + + +__all__ = ("SpeechGrpcTransport",) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/grpc_asyncio.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/grpc_asyncio.py new file mode 100644 index 000000000000..439d1b00cf11 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/services/speech/transports/grpc_asyncio.py @@ -0,0 +1,344 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.speech_v1p1beta1.types import cloud_speech +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import SpeechTransport, DEFAULT_CLIENT_INFO +from .grpc import SpeechGrpcTransport + + +class SpeechGrpcAsyncIOTransport(SpeechTransport): + """gRPC AsyncIO backend transport for Speech. + + Service that implements Google Cloud Speech API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "speech.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "speech.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def recognize( + self, + ) -> Callable[ + [cloud_speech.RecognizeRequest], Awaitable[cloud_speech.RecognizeResponse] + ]: + r"""Return a callable for the recognize method over gRPC. + + Performs synchronous speech recognition: receive + results after all audio has been sent and processed. + + Returns: + Callable[[~.RecognizeRequest], + Awaitable[~.RecognizeResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "recognize" not in self._stubs: + self._stubs["recognize"] = self.grpc_channel.unary_unary( + "/google.cloud.speech.v1p1beta1.Speech/Recognize", + request_serializer=cloud_speech.RecognizeRequest.serialize, + response_deserializer=cloud_speech.RecognizeResponse.deserialize, + ) + return self._stubs["recognize"] + + @property + def long_running_recognize( + self, + ) -> Callable[ + [cloud_speech.LongRunningRecognizeRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the long running recognize method over gRPC. + + Performs asynchronous speech recognition: receive results via + the google.longrunning.Operations interface. Returns either an + ``Operation.error`` or an ``Operation.response`` which contains + a ``LongRunningRecognizeResponse`` message. For more information + on asynchronous speech recognition, see the + `how-to `__. + + Returns: + Callable[[~.LongRunningRecognizeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "long_running_recognize" not in self._stubs: + self._stubs["long_running_recognize"] = self.grpc_channel.unary_unary( + "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize", + request_serializer=cloud_speech.LongRunningRecognizeRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["long_running_recognize"] + + @property + def streaming_recognize( + self, + ) -> Callable[ + [cloud_speech.StreamingRecognizeRequest], + Awaitable[cloud_speech.StreamingRecognizeResponse], + ]: + r"""Return a callable for the streaming recognize method over gRPC. + + Performs bidirectional streaming speech recognition: + receive results while sending audio. This method is only + available via the gRPC API (not REST). + + Returns: + Callable[[~.StreamingRecognizeRequest], + Awaitable[~.StreamingRecognizeResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "streaming_recognize" not in self._stubs: + self._stubs["streaming_recognize"] = self.grpc_channel.stream_stream( + "/google.cloud.speech.v1p1beta1.Speech/StreamingRecognize", + request_serializer=cloud_speech.StreamingRecognizeRequest.serialize, + response_deserializer=cloud_speech.StreamingRecognizeResponse.deserialize, + ) + return self._stubs["streaming_recognize"] + + +__all__ = ("SpeechGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types.py deleted file mode 100644 index 6b4b13c2a400..000000000000 --- a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.speech_v1p1beta1.proto import cloud_speech_pb2 -from google.cloud.speech_v1p1beta1.proto import resource_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 - - -_shared_modules = [ - operations_pb2, - any_pb2, - duration_pb2, - timestamp_pb2, - status_pb2, -] - -_local_modules = [ - cloud_speech_pb2, - resource_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.speech_v1p1beta1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types/__init__.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types/__init__.py new file mode 100644 index 000000000000..f935f5029a65 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types/__init__.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .resource import ( + CustomClass, + PhraseSet, + SpeechAdaptation, +) +from .cloud_speech import ( + RecognizeRequest, + LongRunningRecognizeRequest, + StreamingRecognizeRequest, + StreamingRecognitionConfig, + RecognitionConfig, + SpeakerDiarizationConfig, + RecognitionMetadata, + SpeechContext, + RecognitionAudio, + RecognizeResponse, + LongRunningRecognizeResponse, + LongRunningRecognizeMetadata, + StreamingRecognizeResponse, + StreamingRecognitionResult, + SpeechRecognitionResult, + SpeechRecognitionAlternative, + WordInfo, +) + + +__all__ = ( + "CustomClass", + "PhraseSet", + "SpeechAdaptation", + "RecognizeRequest", + "LongRunningRecognizeRequest", + "StreamingRecognizeRequest", + "StreamingRecognitionConfig", + "RecognitionConfig", + "SpeakerDiarizationConfig", + "RecognitionMetadata", + "SpeechContext", + "RecognitionAudio", + "RecognizeResponse", + "LongRunningRecognizeResponse", + "LongRunningRecognizeMetadata", + "StreamingRecognizeResponse", + "StreamingRecognitionResult", + "SpeechRecognitionResult", + "SpeechRecognitionAlternative", + "WordInfo", +) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types/cloud_speech.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types/cloud_speech.py new file mode 100644 index 000000000000..e2ea5ff50879 --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types/cloud_speech.py @@ -0,0 +1,939 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.speech_v1p1beta1.types import resource +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.rpc import status_pb2 as status # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.speech.v1p1beta1", + manifest={ + "RecognizeRequest", + "LongRunningRecognizeRequest", + "StreamingRecognizeRequest", + "StreamingRecognitionConfig", + "RecognitionConfig", + "SpeakerDiarizationConfig", + "RecognitionMetadata", + "SpeechContext", + "RecognitionAudio", + "RecognizeResponse", + "LongRunningRecognizeResponse", + "LongRunningRecognizeMetadata", + "StreamingRecognizeResponse", + "StreamingRecognitionResult", + "SpeechRecognitionResult", + "SpeechRecognitionAlternative", + "WordInfo", + }, +) + + +class RecognizeRequest(proto.Message): + r"""The top-level message sent by the client for the ``Recognize`` + method. + + Attributes: + config (~.cloud_speech.RecognitionConfig): + Required. Provides information to the + recognizer that specifies how to process the + request. + audio (~.cloud_speech.RecognitionAudio): + Required. The audio data to be recognized. + """ + + config = proto.Field(proto.MESSAGE, number=1, message="RecognitionConfig",) + + audio = proto.Field(proto.MESSAGE, number=2, message="RecognitionAudio",) + + +class LongRunningRecognizeRequest(proto.Message): + r"""The top-level message sent by the client for the + ``LongRunningRecognize`` method. + + Attributes: + config (~.cloud_speech.RecognitionConfig): + Required. Provides information to the + recognizer that specifies how to process the + request. + audio (~.cloud_speech.RecognitionAudio): + Required. The audio data to be recognized. + """ + + config = proto.Field(proto.MESSAGE, number=1, message="RecognitionConfig",) + + audio = proto.Field(proto.MESSAGE, number=2, message="RecognitionAudio",) + + +class StreamingRecognizeRequest(proto.Message): + r"""The top-level message sent by the client for the + ``StreamingRecognize`` method. Multiple + ``StreamingRecognizeRequest`` messages are sent. The first message + must contain a ``streaming_config`` message and must not contain + ``audio_content``. All subsequent messages must contain + ``audio_content`` and must not contain a ``streaming_config`` + message. + + Attributes: + streaming_config (~.cloud_speech.StreamingRecognitionConfig): + Provides information to the recognizer that specifies how to + process the request. The first ``StreamingRecognizeRequest`` + message must contain a ``streaming_config`` message. + audio_content (bytes): + The audio data to be recognized. Sequential chunks of audio + data are sent in sequential ``StreamingRecognizeRequest`` + messages. The first ``StreamingRecognizeRequest`` message + must not contain ``audio_content`` data and all subsequent + ``StreamingRecognizeRequest`` messages must contain + ``audio_content`` data. The audio bytes must be encoded as + specified in ``RecognitionConfig``. Note: as with all bytes + fields, proto buffers use a pure binary representation (not + base64). See `content + limits `__. + """ + + streaming_config = proto.Field( + proto.MESSAGE, + number=1, + oneof="streaming_request", + message="StreamingRecognitionConfig", + ) + + audio_content = proto.Field(proto.BYTES, number=2, oneof="streaming_request") + + +class StreamingRecognitionConfig(proto.Message): + r"""Provides information to the recognizer that specifies how to + process the request. + + Attributes: + config (~.cloud_speech.RecognitionConfig): + Required. Provides information to the + recognizer that specifies how to process the + request. + single_utterance (bool): + If ``false`` or omitted, the recognizer will perform + continuous recognition (continuing to wait for and process + audio even if the user pauses speaking) until the client + closes the input stream (gRPC API) or until the maximum time + limit has been reached. May return multiple + ``StreamingRecognitionResult``\ s with the ``is_final`` flag + set to ``true``. + + If ``true``, the recognizer will detect a single spoken + utterance. When it detects that the user has paused or + stopped speaking, it will return an + ``END_OF_SINGLE_UTTERANCE`` event and cease recognition. It + will return no more than one ``StreamingRecognitionResult`` + with the ``is_final`` flag set to ``true``. + interim_results (bool): + If ``true``, interim results (tentative hypotheses) may be + returned as they become available (these interim results are + indicated with the ``is_final=false`` flag). If ``false`` or + omitted, only ``is_final=true`` result(s) are returned. + """ + + config = proto.Field(proto.MESSAGE, number=1, message="RecognitionConfig",) + + single_utterance = proto.Field(proto.BOOL, number=2) + + interim_results = proto.Field(proto.BOOL, number=3) + + +class RecognitionConfig(proto.Message): + r"""Provides information to the recognizer that specifies how to + process the request. + + Attributes: + encoding (~.cloud_speech.RecognitionConfig.AudioEncoding): + Encoding of audio data sent in all ``RecognitionAudio`` + messages. This field is optional for ``FLAC`` and ``WAV`` + audio files and required for all other audio formats. For + details, see + [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. + sample_rate_hertz (int): + Sample rate in Hertz of the audio data sent in all + ``RecognitionAudio`` messages. Valid values are: 8000-48000. + 16000 is optimal. For best results, set the sampling rate of + the audio source to 16000 Hz. If that's not possible, use + the native sample rate of the audio source (instead of + re-sampling). This field is optional for FLAC and WAV audio + files, but is required for all other audio formats. For + details, see + [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. + audio_channel_count (int): + The number of channels in the input audio data. ONLY set + this for MULTI-CHANNEL recognition. Valid values for + LINEAR16 and FLAC are ``1``-``8``. Valid values for OGG_OPUS + are '1'-'254'. Valid value for MULAW, AMR, AMR_WB and + SPEEX_WITH_HEADER_BYTE is only ``1``. If ``0`` or omitted, + defaults to one channel (mono). Note: We only recognize the + first channel by default. To perform independent recognition + on each channel set + ``enable_separate_recognition_per_channel`` to 'true'. + enable_separate_recognition_per_channel (bool): + This needs to be set to ``true`` explicitly and + ``audio_channel_count`` > 1 to get each channel recognized + separately. The recognition result will contain a + ``channel_tag`` field to state which channel that result + belongs to. If this is not true, we will only recognize the + first channel. The request is billed cumulatively for all + channels recognized: ``audio_channel_count`` multiplied by + the length of the audio. + language_code (str): + Required. The language of the supplied audio as a + `BCP-47 `__ + language tag. Example: "en-US". See `Language + Support `__ + for a list of the currently supported language codes. + alternative_language_codes (Sequence[str]): + A list of up to 3 additional + `BCP-47 `__ + language tags, listing possible alternative languages of the + supplied audio. See `Language + Support `__ + for a list of the currently supported language codes. If + alternative languages are listed, recognition result will + contain recognition in the most likely language detected + including the main language_code. The recognition result + will include the language tag of the language detected in + the audio. Note: This feature is only supported for Voice + Command and Voice Search use cases and performance may vary + for other use cases (e.g., phone call transcription). + max_alternatives (int): + Maximum number of recognition hypotheses to be returned. + Specifically, the maximum number of + ``SpeechRecognitionAlternative`` messages within each + ``SpeechRecognitionResult``. The server may return fewer + than ``max_alternatives``. Valid values are ``0``-``30``. A + value of ``0`` or ``1`` will return a maximum of one. If + omitted, will return a maximum of one. + profanity_filter (bool): + If set to ``true``, the server will attempt to filter out + profanities, replacing all but the initial character in each + filtered word with asterisks, e.g. "f***". If set to + ``false`` or omitted, profanities won't be filtered out. + adaptation (~.resource.SpeechAdaptation): + Speech adaptation configuration improves the accuracy of + speech recognition. When speech adaptation is set it + supersedes the ``speech_contexts`` field. For more + information, see the `speech + adaptation `__ + documentation. + speech_contexts (Sequence[~.cloud_speech.SpeechContext]): + Array of + [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. + A means to provide context to assist the speech recognition. + For more information, see `speech + adaptation `__. + enable_word_time_offsets (bool): + If ``true``, the top result includes a list of words and the + start and end time offsets (timestamps) for those words. If + ``false``, no word-level time offset information is + returned. The default is ``false``. + enable_word_confidence (bool): + If ``true``, the top result includes a list of words and the + confidence for those words. If ``false``, no word-level + confidence information is returned. The default is + ``false``. + enable_automatic_punctuation (bool): + If 'true', adds punctuation to recognition + result hypotheses. This feature is only + available in select languages. Setting this for + requests in other languages has no effect at + all. The default 'false' value does not add + punctuation to result hypotheses. + enable_speaker_diarization (bool): + If 'true', enables speaker detection for each recognized + word in the top alternative of the recognition result using + a speaker_tag provided in the WordInfo. Note: Use + diarization_config instead. + diarization_speaker_count (int): + If set, specifies the estimated number of speakers in the + conversation. Defaults to '2'. Ignored unless + enable_speaker_diarization is set to true. Note: Use + diarization_config instead. + diarization_config (~.cloud_speech.SpeakerDiarizationConfig): + Config to enable speaker diarization and set + additional parameters to make diarization better + suited for your application. Note: When this is + enabled, we send all the words from the + beginning of the audio for the top alternative + in every consecutive STREAMING responses. This + is done in order to improve our speaker tags as + our models learn to identify the speakers in the + conversation over time. For non-streaming + requests, the diarization results will be + provided only in the top alternative of the + FINAL SpeechRecognitionResult. + metadata (~.cloud_speech.RecognitionMetadata): + Metadata regarding this request. + model (str): + Which model to select for the given request. Select the + model best suited to your domain to get best results. If a + model is not explicitly specified, then we auto-select a + model based on the parameters in the RecognitionConfig. + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + +
ModelDescription
command_and_searchBest for short queries such as voice commands or voice search.
phone_callBest for audio that originated from a phone call (typically + recorded at an 8khz sampling rate).
videoBest for audio that originated from from video or includes multiple + speakers. Ideally the audio is recorded at a 16khz or greater + sampling rate. This is a premium model that costs more than the + standard rate.
defaultBest for audio that is not one of the specific audio models. + For example, long-form audio. Ideally the audio is high-fidelity, + recorded at a 16khz or greater sampling rate.
+ use_enhanced (bool): + Set to true to use an enhanced model for speech recognition. + If ``use_enhanced`` is set to true and the ``model`` field + is not set, then an appropriate enhanced model is chosen if + an enhanced model exists for the audio. + + If ``use_enhanced`` is true and an enhanced version of the + specified model does not exist, then the speech is + recognized using the standard version of the specified + model. + """ + + class AudioEncoding(proto.Enum): + r"""The encoding of the audio data sent in the request. + + All encodings support only 1 channel (mono) audio, unless the + ``audio_channel_count`` and + ``enable_separate_recognition_per_channel`` fields are set. + + For best results, the audio source should be captured and + transmitted using a lossless encoding (``FLAC`` or ``LINEAR16``). + The accuracy of the speech recognition can be reduced if lossy + codecs are used to capture or transmit audio, particularly if + background noise is present. Lossy codecs include ``MULAW``, + ``AMR``, ``AMR_WB``, ``OGG_OPUS``, ``SPEEX_WITH_HEADER_BYTE``, and + ``MP3``. + + The ``FLAC`` and ``WAV`` audio file formats include a header that + describes the included audio content. You can request recognition + for ``WAV`` files that contain either ``LINEAR16`` or ``MULAW`` + encoded audio. If you send ``FLAC`` or ``WAV`` audio file format in + your request, you do not need to specify an ``AudioEncoding``; the + audio encoding format is determined from the file header. If you + specify an ``AudioEncoding`` when you send send ``FLAC`` or ``WAV`` + audio, the encoding configuration must match the encoding described + in the audio header; otherwise the request returns an + [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] + error code. + """ + ENCODING_UNSPECIFIED = 0 + LINEAR16 = 1 + FLAC = 2 + MULAW = 3 + AMR = 4 + AMR_WB = 5 + OGG_OPUS = 6 + SPEEX_WITH_HEADER_BYTE = 7 + MP3 = 8 + + encoding = proto.Field(proto.ENUM, number=1, enum=AudioEncoding,) + + sample_rate_hertz = proto.Field(proto.INT32, number=2) + + audio_channel_count = proto.Field(proto.INT32, number=7) + + enable_separate_recognition_per_channel = proto.Field(proto.BOOL, number=12) + + language_code = proto.Field(proto.STRING, number=3) + + alternative_language_codes = proto.RepeatedField(proto.STRING, number=18) + + max_alternatives = proto.Field(proto.INT32, number=4) + + profanity_filter = proto.Field(proto.BOOL, number=5) + + adaptation = proto.Field( + proto.MESSAGE, number=20, message=resource.SpeechAdaptation, + ) + + speech_contexts = proto.RepeatedField( + proto.MESSAGE, number=6, message="SpeechContext", + ) + + enable_word_time_offsets = proto.Field(proto.BOOL, number=8) + + enable_word_confidence = proto.Field(proto.BOOL, number=15) + + enable_automatic_punctuation = proto.Field(proto.BOOL, number=11) + + enable_speaker_diarization = proto.Field(proto.BOOL, number=16) + + diarization_speaker_count = proto.Field(proto.INT32, number=17) + + diarization_config = proto.Field( + proto.MESSAGE, number=19, message="SpeakerDiarizationConfig", + ) + + metadata = proto.Field(proto.MESSAGE, number=9, message="RecognitionMetadata",) + + model = proto.Field(proto.STRING, number=13) + + use_enhanced = proto.Field(proto.BOOL, number=14) + + +class SpeakerDiarizationConfig(proto.Message): + r"""Config to enable speaker diarization. + + Attributes: + enable_speaker_diarization (bool): + If 'true', enables speaker detection for each recognized + word in the top alternative of the recognition result using + a speaker_tag provided in the WordInfo. + min_speaker_count (int): + Minimum number of speakers in the + conversation. This range gives you more + flexibility by allowing the system to + automatically determine the correct number of + speakers. If not set, the default value is 2. + max_speaker_count (int): + Maximum number of speakers in the + conversation. This range gives you more + flexibility by allowing the system to + automatically determine the correct number of + speakers. If not set, the default value is 6. + speaker_tag (int): + Output only. Unused. + """ + + enable_speaker_diarization = proto.Field(proto.BOOL, number=1) + + min_speaker_count = proto.Field(proto.INT32, number=2) + + max_speaker_count = proto.Field(proto.INT32, number=3) + + speaker_tag = proto.Field(proto.INT32, number=5) + + +class RecognitionMetadata(proto.Message): + r"""Description of audio data to be recognized. + + Attributes: + interaction_type (~.cloud_speech.RecognitionMetadata.InteractionType): + The use case most closely describing the + audio content to be recognized. + industry_naics_code_of_audio (int): + The industry vertical to which this speech + recognition request most closely applies. This + is most indicative of the topics contained in + the audio. Use the 6-digit NAICS code to + identify the industry vertical - see + https://www.naics.com/search/. + microphone_distance (~.cloud_speech.RecognitionMetadata.MicrophoneDistance): + The audio type that most closely describes + the audio being recognized. + original_media_type (~.cloud_speech.RecognitionMetadata.OriginalMediaType): + The original media the speech was recorded + on. + recording_device_type (~.cloud_speech.RecognitionMetadata.RecordingDeviceType): + The type of device the speech was recorded + with. + recording_device_name (str): + The device used to make the recording. + Examples 'Nexus 5X' or 'Polycom SoundStation IP + 6000' or 'POTS' or 'VoIP' or 'Cardioid + Microphone'. + original_mime_type (str): + Mime type of the original audio file. For example + ``audio/m4a``, ``audio/x-alaw-basic``, ``audio/mp3``, + ``audio/3gpp``. A list of possible audio mime types is + maintained at + http://www.iana.org/assignments/media-types/media-types.xhtml#audio + obfuscated_id (int): + Obfuscated (privacy-protected) ID of the + user, to identify number of unique users using + the service. + audio_topic (str): + Description of the content. Eg. "Recordings + of federal supreme court hearings from 2012". + """ + + class InteractionType(proto.Enum): + r"""Use case categories that the audio recognition request can be + described by. + """ + INTERACTION_TYPE_UNSPECIFIED = 0 + DISCUSSION = 1 + PRESENTATION = 2 + PHONE_CALL = 3 + VOICEMAIL = 4 + PROFESSIONALLY_PRODUCED = 5 + VOICE_SEARCH = 6 + VOICE_COMMAND = 7 + DICTATION = 8 + + class MicrophoneDistance(proto.Enum): + r"""Enumerates the types of capture settings describing an audio + file. + """ + MICROPHONE_DISTANCE_UNSPECIFIED = 0 + NEARFIELD = 1 + MIDFIELD = 2 + FARFIELD = 3 + + class OriginalMediaType(proto.Enum): + r"""The original media the speech was recorded on.""" + ORIGINAL_MEDIA_TYPE_UNSPECIFIED = 0 + AUDIO = 1 + VIDEO = 2 + + class RecordingDeviceType(proto.Enum): + r"""The type of device the speech was recorded with.""" + RECORDING_DEVICE_TYPE_UNSPECIFIED = 0 + SMARTPHONE = 1 + PC = 2 + PHONE_LINE = 3 + VEHICLE = 4 + OTHER_OUTDOOR_DEVICE = 5 + OTHER_INDOOR_DEVICE = 6 + + interaction_type = proto.Field(proto.ENUM, number=1, enum=InteractionType,) + + industry_naics_code_of_audio = proto.Field(proto.UINT32, number=3) + + microphone_distance = proto.Field(proto.ENUM, number=4, enum=MicrophoneDistance,) + + original_media_type = proto.Field(proto.ENUM, number=5, enum=OriginalMediaType,) + + recording_device_type = proto.Field(proto.ENUM, number=6, enum=RecordingDeviceType,) + + recording_device_name = proto.Field(proto.STRING, number=7) + + original_mime_type = proto.Field(proto.STRING, number=8) + + obfuscated_id = proto.Field(proto.INT64, number=9) + + audio_topic = proto.Field(proto.STRING, number=10) + + +class SpeechContext(proto.Message): + r"""Provides "hints" to the speech recognizer to favor specific + words and phrases in the results. + + Attributes: + phrases (Sequence[str]): + A list of strings containing words and phrases "hints" so + that the speech recognition is more likely to recognize + them. This can be used to improve the accuracy for specific + words and phrases, for example, if specific commands are + typically spoken by the user. This can also be used to add + additional words to the vocabulary of the recognizer. See + `usage + limits `__. + + List items can also be set to classes for groups of words + that represent common concepts that occur in natural + language. For example, rather than providing phrase hints + for every month of the year, using the $MONTH class improves + the likelihood of correctly transcribing audio that includes + months. + boost (float): + Hint Boost. Positive value will increase the probability + that a specific phrase will be recognized over other similar + sounding phrases. The higher the boost, the higher the + chance of false positive recognition as well. Negative boost + values would correspond to anti-biasing. Anti-biasing is not + enabled, so negative boost will simply be ignored. Though + ``boost`` can accept a wide range of positive values, most + use cases are best served with values between 0 and 20. We + recommend using a binary search approach to finding the + optimal value for your use case. + """ + + phrases = proto.RepeatedField(proto.STRING, number=1) + + boost = proto.Field(proto.FLOAT, number=4) + + +class RecognitionAudio(proto.Message): + r"""Contains audio data in the encoding specified in the + ``RecognitionConfig``. Either ``content`` or ``uri`` must be + supplied. Supplying both or neither returns + [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. + See `content + limits `__. + + Attributes: + content (bytes): + The audio data bytes encoded as specified in + ``RecognitionConfig``. Note: as with all bytes fields, proto + buffers use a pure binary representation, whereas JSON + representations use base64. + uri (str): + URI that points to a file that contains audio data bytes as + specified in ``RecognitionConfig``. The file must not be + compressed (for example, gzip). Currently, only Google Cloud + Storage URIs are supported, which must be specified in the + following format: ``gs://bucket_name/object_name`` (other + URI formats return + [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). + For more information, see `Request + URIs `__. + """ + + content = proto.Field(proto.BYTES, number=1, oneof="audio_source") + + uri = proto.Field(proto.STRING, number=2, oneof="audio_source") + + +class RecognizeResponse(proto.Message): + r"""The only message returned to the client by the ``Recognize`` method. + It contains the result as zero or more sequential + ``SpeechRecognitionResult`` messages. + + Attributes: + results (Sequence[~.cloud_speech.SpeechRecognitionResult]): + Sequential list of transcription results + corresponding to sequential portions of audio. + """ + + results = proto.RepeatedField( + proto.MESSAGE, number=2, message="SpeechRecognitionResult", + ) + + +class LongRunningRecognizeResponse(proto.Message): + r"""The only message returned to the client by the + ``LongRunningRecognize`` method. It contains the result as zero or + more sequential ``SpeechRecognitionResult`` messages. It is included + in the ``result.response`` field of the ``Operation`` returned by + the ``GetOperation`` call of the ``google::longrunning::Operations`` + service. + + Attributes: + results (Sequence[~.cloud_speech.SpeechRecognitionResult]): + Sequential list of transcription results + corresponding to sequential portions of audio. + """ + + results = proto.RepeatedField( + proto.MESSAGE, number=2, message="SpeechRecognitionResult", + ) + + +class LongRunningRecognizeMetadata(proto.Message): + r"""Describes the progress of a long-running ``LongRunningRecognize`` + call. It is included in the ``metadata`` field of the ``Operation`` + returned by the ``GetOperation`` call of the + ``google::longrunning::Operations`` service. + + Attributes: + progress_percent (int): + Approximate percentage of audio processed + thus far. Guaranteed to be 100 when the audio is + fully processed and the results are available. + start_time (~.timestamp.Timestamp): + Time when the request was received. + last_update_time (~.timestamp.Timestamp): + Time of the most recent processing update. + uri (str): + The URI of the audio file being transcribed. + Empty if the audio was sent as byte content. + """ + + progress_percent = proto.Field(proto.INT32, number=1) + + start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + last_update_time = proto.Field( + proto.MESSAGE, number=3, message=timestamp.Timestamp, + ) + + uri = proto.Field(proto.STRING, number=4) + + +class StreamingRecognizeResponse(proto.Message): + r"""``StreamingRecognizeResponse`` is the only message returned to the + client by ``StreamingRecognize``. A series of zero or more + ``StreamingRecognizeResponse`` messages are streamed back to the + client. If there is no recognizable audio, and ``single_utterance`` + is set to false, then no messages are streamed back to the client. + + Here's an example of a series of ten + ``StreamingRecognizeResponse``\ s that might be returned while + processing audio: + + 1. results { alternatives { transcript: "tube" } stability: 0.01 } + + 2. results { alternatives { transcript: "to be a" } stability: 0.01 + } + + 3. results { alternatives { transcript: "to be" } stability: 0.9 } + results { alternatives { transcript: " or not to be" } stability: + 0.01 } + + 4. results { alternatives { transcript: "to be or not to be" + confidence: 0.92 } alternatives { transcript: "to bee or not to + bee" } is_final: true } + + 5. results { alternatives { transcript: " that's" } stability: 0.01 + } + + 6. results { alternatives { transcript: " that is" } stability: 0.9 + } results { alternatives { transcript: " the question" } + stability: 0.01 } + + 7. results { alternatives { transcript: " that is the question" + confidence: 0.98 } alternatives { transcript: " that was the + question" } is_final: true } + + Notes: + + - Only two of the above responses #4 and #7 contain final results; + they are indicated by ``is_final: true``. Concatenating these + together generates the full transcript: "to be or not to be that + is the question". + + - The others contain interim ``results``. #3 and #6 contain two + interim ``results``: the first portion has a high stability and + is less likely to change; the second portion has a low stability + and is very likely to change. A UI designer might choose to show + only high stability ``results``. + + - The specific ``stability`` and ``confidence`` values shown above + are only for illustrative purposes. Actual values may vary. + + - In each response, only one of these fields will be set: + ``error``, ``speech_event_type``, or one or more (repeated) + ``results``. + + Attributes: + error (~.status.Status): + If set, returns a [google.rpc.Status][google.rpc.Status] + message that specifies the error for the operation. + results (Sequence[~.cloud_speech.StreamingRecognitionResult]): + This repeated list contains zero or more results that + correspond to consecutive portions of the audio currently + being processed. It contains zero or one ``is_final=true`` + result (the newly settled portion), followed by zero or more + ``is_final=false`` results (the interim results). + speech_event_type (~.cloud_speech.StreamingRecognizeResponse.SpeechEventType): + Indicates the type of speech event. + """ + + class SpeechEventType(proto.Enum): + r"""Indicates the type of speech event.""" + SPEECH_EVENT_UNSPECIFIED = 0 + END_OF_SINGLE_UTTERANCE = 1 + + error = proto.Field(proto.MESSAGE, number=1, message=status.Status,) + + results = proto.RepeatedField( + proto.MESSAGE, number=2, message="StreamingRecognitionResult", + ) + + speech_event_type = proto.Field(proto.ENUM, number=4, enum=SpeechEventType,) + + +class StreamingRecognitionResult(proto.Message): + r"""A streaming speech recognition result corresponding to a + portion of the audio that is currently being processed. + + Attributes: + alternatives (Sequence[~.cloud_speech.SpeechRecognitionAlternative]): + May contain one or more recognition hypotheses (up to the + maximum specified in ``max_alternatives``). These + alternatives are ordered in terms of accuracy, with the top + (first) alternative being the most probable, as ranked by + the recognizer. + is_final (bool): + If ``false``, this ``StreamingRecognitionResult`` represents + an interim result that may change. If ``true``, this is the + final time the speech service will return this particular + ``StreamingRecognitionResult``, the recognizer will not + return any further hypotheses for this portion of the + transcript and corresponding audio. + stability (float): + An estimate of the likelihood that the recognizer will not + change its guess about this interim result. Values range + from 0.0 (completely unstable) to 1.0 (completely stable). + This field is only provided for interim results + (``is_final=false``). The default of 0.0 is a sentinel value + indicating ``stability`` was not set. + result_end_time (~.duration.Duration): + Time offset of the end of this result + relative to the beginning of the audio. + channel_tag (int): + For multi-channel audio, this is the channel number + corresponding to the recognized result for the audio from + that channel. For audio_channel_count = N, its output values + can range from '1' to 'N'. + language_code (str): + Output only. The + `BCP-47 `__ + language tag of the language in this result. This language + code was detected to have the most likelihood of being + spoken in the audio. + """ + + alternatives = proto.RepeatedField( + proto.MESSAGE, number=1, message="SpeechRecognitionAlternative", + ) + + is_final = proto.Field(proto.BOOL, number=2) + + stability = proto.Field(proto.FLOAT, number=3) + + result_end_time = proto.Field(proto.MESSAGE, number=4, message=duration.Duration,) + + channel_tag = proto.Field(proto.INT32, number=5) + + language_code = proto.Field(proto.STRING, number=6) + + +class SpeechRecognitionResult(proto.Message): + r"""A speech recognition result corresponding to a portion of the + audio. + + Attributes: + alternatives (Sequence[~.cloud_speech.SpeechRecognitionAlternative]): + May contain one or more recognition hypotheses (up to the + maximum specified in ``max_alternatives``). These + alternatives are ordered in terms of accuracy, with the top + (first) alternative being the most probable, as ranked by + the recognizer. + channel_tag (int): + For multi-channel audio, this is the channel number + corresponding to the recognized result for the audio from + that channel. For audio_channel_count = N, its output values + can range from '1' to 'N'. + language_code (str): + Output only. The + `BCP-47 `__ + language tag of the language in this result. This language + code was detected to have the most likelihood of being + spoken in the audio. + """ + + alternatives = proto.RepeatedField( + proto.MESSAGE, number=1, message="SpeechRecognitionAlternative", + ) + + channel_tag = proto.Field(proto.INT32, number=2) + + language_code = proto.Field(proto.STRING, number=5) + + +class SpeechRecognitionAlternative(proto.Message): + r"""Alternative hypotheses (a.k.a. n-best list). + + Attributes: + transcript (str): + Transcript text representing the words that + the user spoke. + confidence (float): + The confidence estimate between 0.0 and 1.0. A higher number + indicates an estimated greater likelihood that the + recognized words are correct. This field is set only for the + top alternative of a non-streaming result or, of a streaming + result where ``is_final=true``. This field is not guaranteed + to be accurate and users should not rely on it to be always + provided. The default of 0.0 is a sentinel value indicating + ``confidence`` was not set. + words (Sequence[~.cloud_speech.WordInfo]): + A list of word-specific information for each recognized + word. Note: When ``enable_speaker_diarization`` is true, you + will see all the words from the beginning of the audio. + """ + + transcript = proto.Field(proto.STRING, number=1) + + confidence = proto.Field(proto.FLOAT, number=2) + + words = proto.RepeatedField(proto.MESSAGE, number=3, message="WordInfo",) + + +class WordInfo(proto.Message): + r"""Word-specific information for recognized words. + + Attributes: + start_time (~.duration.Duration): + Time offset relative to the beginning of the audio, and + corresponding to the start of the spoken word. This field is + only set if ``enable_word_time_offsets=true`` and only in + the top hypothesis. This is an experimental feature and the + accuracy of the time offset can vary. + end_time (~.duration.Duration): + Time offset relative to the beginning of the audio, and + corresponding to the end of the spoken word. This field is + only set if ``enable_word_time_offsets=true`` and only in + the top hypothesis. This is an experimental feature and the + accuracy of the time offset can vary. + word (str): + The word corresponding to this set of + information. + confidence (float): + The confidence estimate between 0.0 and 1.0. A higher number + indicates an estimated greater likelihood that the + recognized words are correct. This field is set only for the + top alternative of a non-streaming result or, of a streaming + result where ``is_final=true``. This field is not guaranteed + to be accurate and users should not rely on it to be always + provided. The default of 0.0 is a sentinel value indicating + ``confidence`` was not set. + speaker_tag (int): + Output only. A distinct integer value is assigned for every + speaker within the audio. This field specifies which one of + those speakers was detected to have spoken this word. Value + ranges from '1' to diarization_speaker_count. speaker_tag is + set if enable_speaker_diarization = 'true' and only in the + top alternative. + """ + + start_time = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,) + + end_time = proto.Field(proto.MESSAGE, number=2, message=duration.Duration,) + + word = proto.Field(proto.STRING, number=3) + + confidence = proto.Field(proto.FLOAT, number=4) + + speaker_tag = proto.Field(proto.INT32, number=5) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types/resource.py b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types/resource.py new file mode 100644 index 000000000000..3eeba9317f4a --- /dev/null +++ b/packages/google-cloud-python-speech/google/cloud/speech_v1p1beta1/types/resource.py @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.speech.v1p1beta1", + manifest={"CustomClass", "PhraseSet", "SpeechAdaptation",}, +) + + +class CustomClass(proto.Message): + r"""A set of words or phrases that represents a common concept + likely to appear in your audio, for example a list of passenger + ship names. CustomClass items can be substituted into + placeholders that you set in PhraseSet phrases. + + Attributes: + name (str): + The resource name of the custom class. + custom_class_id (str): + If this custom class is a resource, the custom_class_id is + the resource id of the CustomClass. + items (Sequence[~.resource.CustomClass.ClassItem]): + A collection of class items. + """ + + class ClassItem(proto.Message): + r"""An item of the class. + + Attributes: + value (str): + The class item's value. + """ + + value = proto.Field(proto.STRING, number=1) + + name = proto.Field(proto.STRING, number=1) + + custom_class_id = proto.Field(proto.STRING, number=2) + + items = proto.RepeatedField(proto.MESSAGE, number=3, message=ClassItem,) + + +class PhraseSet(proto.Message): + r"""Provides "hints" to the speech recognizer to favor specific + words and phrases in the results. + + Attributes: + name (str): + The resource name of the phrase set. + phrases (Sequence[~.resource.PhraseSet.Phrase]): + A list of word and phrases. + boost (float): + Hint Boost. Positive value will increase the probability + that a specific phrase will be recognized over other similar + sounding phrases. The higher the boost, the higher the + chance of false positive recognition as well. Negative boost + values would correspond to anti-biasing. Anti-biasing is not + enabled, so negative boost will simply be ignored. Though + ``boost`` can accept a wide range of positive values, most + use cases are best served with values between 0 (exclusive) + and 20. We recommend using a binary search approach to + finding the optimal value for your use case. Speech + recognition will skip PhraseSets with a boost value of 0. + """ + + class Phrase(proto.Message): + r"""A phrases containing words and phrase "hints" so that the speech + recognition is more likely to recognize them. This can be used to + improve the accuracy for specific words and phrases, for example, if + specific commands are typically spoken by the user. This can also be + used to add additional words to the vocabulary of the recognizer. + See `usage + limits `__. + + List items can also include pre-built or custom classes containing + groups of words that represent common concepts that occur in natural + language. For example, rather than providing a phrase hint for every + month of the year (e.g. "i was born in january", "i was born in + febuary", ...), use the pre-built ``$MONTH`` class improves the + likelihood of correctly transcribing audio that includes months + (e.g. "i was born in $month"). To refer to pre-built classes, use + the class' symbol prepended with ``$`` e.g. ``$MONTH``. To refer to + custom classes that were defined inline in the request, set the + class's ``custom_class_id`` to a string unique to all class + resources and inline classes. Then use the class' id wrapped in + $\ ``{...}`` e.g. "${my-months}". To refer to custom classes + resources, use the class' id wrapped in ``${}`` (e.g. + ``${my-months}``). + + Attributes: + value (str): + The phrase itself. + boost (float): + Hint Boost. Overrides the boost set at the phrase set level. + Positive value will increase the probability that a specific + phrase will be recognized over other similar sounding + phrases. The higher the boost, the higher the chance of + false positive recognition as well. Negative boost values + would correspond to anti-biasing. Anti-biasing is not + enabled, so negative boost will simply be ignored. Though + ``boost`` can accept a wide range of positive values, most + use cases are best served with values between 0 and 20. We + recommend using a binary search approach to finding the + optimal value for your use case. Speech recognition will + skip PhraseSets with a boost value of 0. + """ + + value = proto.Field(proto.STRING, number=1) + + boost = proto.Field(proto.FLOAT, number=2) + + name = proto.Field(proto.STRING, number=1) + + phrases = proto.RepeatedField(proto.MESSAGE, number=2, message=Phrase,) + + boost = proto.Field(proto.FLOAT, number=4) + + +class SpeechAdaptation(proto.Message): + r"""Speech adaptation configuration. + + Attributes: + phrase_sets (Sequence[~.resource.PhraseSet]): + A collection of phrase sets. To specify the hints inline, + leave the phrase set's ``name`` blank and fill in the rest + of its fields. Any phrase set can use any custom class. + custom_classes (Sequence[~.resource.CustomClass]): + A collection of custom classes. To specify the classes + inline, leave the class' ``name`` blank and fill in the rest + of its fields, giving it a unique ``custom_class_id``. Refer + to the inline defined class in phrase hints by its + ``custom_class_id``. + """ + + phrase_sets = proto.RepeatedField(proto.MESSAGE, number=1, message=PhraseSet,) + + custom_classes = proto.RepeatedField(proto.MESSAGE, number=2, message=CustomClass,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-python-speech/mypy.ini b/packages/google-cloud-python-speech/mypy.ini new file mode 100644 index 000000000000..4505b485436b --- /dev/null +++ b/packages/google-cloud-python-speech/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/packages/google-cloud-python-speech/noxfile.py b/packages/google-cloud-python-speech/noxfile.py index b1d07cb6a033..c9434be47f52 100644 --- a/packages/google-cloud-python-speech/noxfile.py +++ b/packages/google-cloud-python-speech/noxfile.py @@ -27,8 +27,8 @@ BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" -SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.5", "3.6", "3.7", "3.8"] +SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -70,6 +70,8 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. + session.install("asyncmock", "pytest-asyncio") + session.install("mock", "pytest", "pytest-cov") session.install("-e", ".") @@ -139,7 +141,7 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=87") + session.run("coverage", "report", "--show-missing", "--fail-under=99") session.run("coverage", "erase") @@ -149,7 +151,7 @@ def docs(session): """Build the docs for this library.""" session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") + session.install("sphinx", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( @@ -171,7 +173,9 @@ def docfx(session): """Build the docfx yaml files for this library.""" session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark", "sphinx-docfx-yaml") + # sphinx-docfx-yaml supports up to sphinx version 1.5.5. + # https://github.com/docascode/sphinx-docfx-yaml/issues/97 + session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/packages/google-cloud-python-speech/samples/microphone/transcribe_streaming_infinite.py b/packages/google-cloud-python-speech/samples/microphone/transcribe_streaming_infinite.py index 759842b476d0..d6aafde2783d 100644 --- a/packages/google-cloud-python-speech/samples/microphone/transcribe_streaming_infinite.py +++ b/packages/google-cloud-python-speech/samples/microphone/transcribe_streaming_infinite.py @@ -41,9 +41,9 @@ SAMPLE_RATE = 16000 CHUNK_SIZE = int(SAMPLE_RATE / 10) # 100ms -RED = '\033[0;31m' -GREEN = '\033[0;32m' -YELLOW = '\033[0;33m' +RED = "\033[0;31m" +GREEN = "\033[0;32m" +YELLOW = "\033[0;33m" def get_current_time(): @@ -123,12 +123,14 @@ def generator(self): if self.bridging_offset > self.final_request_end_time: self.bridging_offset = self.final_request_end_time - chunks_from_ms = round((self.final_request_end_time - - self.bridging_offset) / chunk_time) + chunks_from_ms = round( + (self.final_request_end_time - self.bridging_offset) + / chunk_time + ) - self.bridging_offset = (round(( - len(self.last_audio_input) - chunks_from_ms) - * chunk_time)) + self.bridging_offset = round( + (len(self.last_audio_input) - chunks_from_ms) * chunk_time + ) for i in range(chunks_from_ms, len(self.last_audio_input)): data.append(self.last_audio_input[i]) @@ -157,7 +159,7 @@ def generator(self): except queue.Empty: break - yield b''.join(data) + yield b"".join(data) def listen_print_loop(responses, stream): @@ -201,35 +203,37 @@ def listen_print_loop(responses, stream): if result.result_end_time.nanos: result_nanos = result.result_end_time.nanos - stream.result_end_time = int((result_seconds * 1000) - + (result_nanos / 1000000)) + stream.result_end_time = int((result_seconds * 1000) + (result_nanos / 1000000)) - corrected_time = (stream.result_end_time - stream.bridging_offset - + (STREAMING_LIMIT * stream.restart_counter)) + corrected_time = ( + stream.result_end_time + - stream.bridging_offset + + (STREAMING_LIMIT * stream.restart_counter) + ) # Display interim results, but with a carriage return at the end of the # line, so subsequent lines will overwrite them. if result.is_final: sys.stdout.write(GREEN) - sys.stdout.write('\033[K') - sys.stdout.write(str(corrected_time) + ': ' + transcript + '\n') + sys.stdout.write("\033[K") + sys.stdout.write(str(corrected_time) + ": " + transcript + "\n") stream.is_final_end_time = stream.result_end_time stream.last_transcript_was_final = True # Exit recognition if any of the transcribed phrases could be # one of our keywords. - if re.search(r'\b(exit|quit)\b', transcript, re.I): + if re.search(r"\b(exit|quit)\b", transcript, re.I): sys.stdout.write(YELLOW) - sys.stdout.write('Exiting...\n') + sys.stdout.write("Exiting...\n") stream.closed = True break else: sys.stdout.write(RED) - sys.stdout.write('\033[K') - sys.stdout.write(str(corrected_time) + ': ' + transcript + '\r') + sys.stdout.write("\033[K") + sys.stdout.write(str(corrected_time) + ": " + transcript + "\r") stream.last_transcript_was_final = False @@ -238,37 +242,42 @@ def main(): """start bidirectional streaming from microphone input to speech API""" client = speech.SpeechClient() - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=SAMPLE_RATE, - language_code='en-US', - max_alternatives=1) - streaming_config = speech.types.StreamingRecognitionConfig( - config=config, - interim_results=True) + language_code="en-US", + max_alternatives=1, + ) + streaming_config = speech.StreamingRecognitionConfig( + config=config, interim_results=True + ) mic_manager = ResumableMicrophoneStream(SAMPLE_RATE, CHUNK_SIZE) print(mic_manager.chunk_size) sys.stdout.write(YELLOW) sys.stdout.write('\nListening, say "Quit" or "Exit" to stop.\n\n') - sys.stdout.write('End (ms) Transcript Results/Status\n') - sys.stdout.write('=====================================================\n') + sys.stdout.write("End (ms) Transcript Results/Status\n") + sys.stdout.write("=====================================================\n") with mic_manager as stream: while not stream.closed: sys.stdout.write(YELLOW) - sys.stdout.write('\n' + str( - STREAMING_LIMIT * stream.restart_counter) + ': NEW REQUEST\n') + sys.stdout.write( + "\n" + str(STREAMING_LIMIT * stream.restart_counter) + ": NEW REQUEST\n" + ) stream.audio_input = [] audio_generator = stream.generator() - requests = (speech.types.StreamingRecognizeRequest( - audio_content=content)for content in audio_generator) + requests = ( + speech.StreamingRecognizeRequest(audio_content=content) + for content in audio_generator + ) - responses = client.streaming_recognize(streaming_config, - requests) + responses = client.streaming_recognize( + requests=requests, config=streaming_config + ) # Now, put the transcription responses to use. listen_print_loop(responses, stream) @@ -282,11 +291,11 @@ def main(): stream.restart_counter = stream.restart_counter + 1 if not stream.last_transcript_was_final: - sys.stdout.write('\n') + sys.stdout.write("\n") stream.new_stream = True -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/packages/google-cloud-python-speech/samples/microphone/transcribe_streaming_mic.py b/packages/google-cloud-python-speech/samples/microphone/transcribe_streaming_mic.py index 3ca7b7094124..b484a10e23e3 100644 --- a/packages/google-cloud-python-speech/samples/microphone/transcribe_streaming_mic.py +++ b/packages/google-cloud-python-speech/samples/microphone/transcribe_streaming_mic.py @@ -32,8 +32,6 @@ import sys from google.cloud import speech -from google.cloud.speech import enums -from google.cloud.speech import types import pyaudio from six.moves import queue @@ -44,6 +42,7 @@ class MicrophoneStream(object): """Opens a recording stream as a generator yielding the audio chunks.""" + def __init__(self, rate, chunk): self._rate = rate self._chunk = chunk @@ -58,8 +57,10 @@ def __enter__(self): format=pyaudio.paInt16, # The API currently only supports 1-channel (mono) audio # https://goo.gl/z757pE - channels=1, rate=self._rate, - input=True, frames_per_buffer=self._chunk, + channels=1, + rate=self._rate, + input=True, + frames_per_buffer=self._chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. @@ -104,7 +105,7 @@ def generator(self): except queue.Empty: break - yield b''.join(data) + yield b"".join(data) def listen_print_loop(responses): @@ -142,10 +143,10 @@ def listen_print_loop(responses): # # If the previous result was longer than this one, we need to print # some extra spaces to overwrite the previous result - overwrite_chars = ' ' * (num_chars_printed - len(transcript)) + overwrite_chars = " " * (num_chars_printed - len(transcript)) if not result.is_final: - sys.stdout.write(transcript + overwrite_chars + '\r') + sys.stdout.write(transcript + overwrite_chars + "\r") sys.stdout.flush() num_chars_printed = len(transcript) @@ -155,8 +156,8 @@ def listen_print_loop(responses): # Exit recognition if any of the transcribed phrases could be # one of our keywords. - if re.search(r'\b(exit|quit)\b', transcript, re.I): - print('Exiting..') + if re.search(r"\b(exit|quit)\b", transcript, re.I): + print("Exiting..") break num_chars_printed = 0 @@ -165,28 +166,33 @@ def listen_print_loop(responses): def main(): # See http://g.co/cloud/speech/docs/languages # for a list of supported languages. - language_code = 'en-US' # a BCP-47 language tag + language_code = "en-US" # a BCP-47 language tag client = speech.SpeechClient() - config = types.RecognitionConfig( - encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=RATE, - language_code=language_code) - streaming_config = types.StreamingRecognitionConfig( - config=config, - interim_results=True) + language_code=language_code, + ) + streaming_config = speech.StreamingRecognitionConfig( + config=config, interim_results=True + ) with MicrophoneStream(RATE, CHUNK) as stream: audio_generator = stream.generator() - requests = (types.StreamingRecognizeRequest(audio_content=content) - for content in audio_generator) + requests = ( + speech.StreamingRecognizeRequest(audio_content=content) + for content in audio_generator + ) - responses = client.streaming_recognize(streaming_config, requests) + responses = client.streaming_recognize( + requests=requests, config=streaming_config + ) # Now, put the transcription responses to use. listen_print_loop(responses) -if __name__ == '__main__': +if __name__ == "__main__": main() # [END speech_transcribe_streaming_mic] diff --git a/packages/google-cloud-python-speech/samples/microphone/transcribe_streaming_mic_test.py b/packages/google-cloud-python-speech/samples/microphone/transcribe_streaming_mic_test.py index dd5e7ea6f5e6..f5e08f5d30b2 100644 --- a/packages/google-cloud-python-speech/samples/microphone/transcribe_streaming_mic_test.py +++ b/packages/google-cloud-python-speech/samples/microphone/transcribe_streaming_mic_test.py @@ -18,7 +18,7 @@ import mock -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") class MockPyAudio(object): @@ -32,8 +32,9 @@ def open(self, stream_callback, rate, *args, **kwargs): self.rate = rate self.closed = threading.Event() self.stream_thread = threading.Thread( - target=self.stream_audio, args=( - self.audio_filename, stream_callback, self.closed)) + target=self.stream_audio, + args=(self.audio_filename, stream_callback, self.closed), + ) self.stream_thread.start() return self @@ -47,23 +48,25 @@ def terminate(self): pass def stream_audio(self, audio_filename, callback, closed, num_frames=512): - with open(audio_filename, 'rb') as audio_file: + with open(audio_filename, "rb") as audio_file: while not closed.is_set(): # Approximate realtime by sleeping for the appropriate time for # the requested number of frames time.sleep(num_frames / float(self.rate)) # audio is 16-bit samples, whereas python byte is 8-bit num_bytes = 2 * num_frames - chunk = audio_file.read(num_bytes) or b'\0' * num_bytes + chunk = audio_file.read(num_bytes) or b"\0" * num_bytes callback(chunk, None, None, None) -@mock.patch.dict('sys.modules', pyaudio=mock.MagicMock( - PyAudio=MockPyAudio(os.path.join(RESOURCES, 'quit.raw')))) +@mock.patch.dict( + "sys.modules", + pyaudio=mock.MagicMock(PyAudio=MockPyAudio(os.path.join(RESOURCES, "quit.raw"))), +) def test_main(capsys): import transcribe_streaming_mic transcribe_streaming_mic.main() out, err = capsys.readouterr() - assert re.search(r'quit', out, re.DOTALL | re.I) + assert re.search(r"quit", out, re.DOTALL | re.I) diff --git a/packages/google-cloud-python-speech/samples/snippets/beta_snippets.py b/packages/google-cloud-python-speech/samples/snippets/beta_snippets.py index 79d9c3d587eb..eaafe3ca978e 100644 --- a/packages/google-cloud-python-speech/samples/snippets/beta_snippets.py +++ b/packages/google-cloud-python-speech/samples/snippets/beta_snippets.py @@ -35,29 +35,31 @@ def transcribe_file_with_enhanced_model(): """Transcribe the given audio file using an enhanced model.""" # [START speech_transcribe_enhanced_model_beta] from google.cloud import speech_v1p1beta1 as speech + client = speech.SpeechClient() - speech_file = 'resources/commercial_mono.wav' + speech_file = "resources/commercial_mono.wav" - with io.open(speech_file, 'rb') as audio_file: + with io.open(speech_file, "rb") as audio_file: content = audio_file.read() - audio = speech.types.RecognitionAudio(content=content) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + audio = speech.RecognitionAudio(content=content) + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=8000, - language_code='en-US', + language_code="en-US", use_enhanced=True, # A model must be specified to use enhanced model. - model='phone_call') + model="phone_call", + ) - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print(u'First alternative of result {}'.format(i)) - print(u'Transcript: {}'.format(alternative.transcript)) + print("-" * 20) + print(f"First alternative of result {i}") + print(f"Transcript: {alternative.transcript}") # [END speech_transcribe_enhanced_model_beta] @@ -65,44 +67,47 @@ def transcribe_file_with_metadata(): """Send a request that includes recognition metadata.""" # [START speech_transcribe_recognition_metadata_beta] from google.cloud import speech_v1p1beta1 as speech + client = speech.SpeechClient() - speech_file = 'resources/commercial_mono.wav' + speech_file = "resources/commercial_mono.wav" - with io.open(speech_file, 'rb') as audio_file: + with io.open(speech_file, "rb") as audio_file: content = audio_file.read() # Here we construct a recognition metadata object. # Most metadata fields are specified as enums that can be found - # in speech.enums.RecognitionMetadata - metadata = speech.types.RecognitionMetadata() - metadata.interaction_type = ( - speech.enums.RecognitionMetadata.InteractionType.DISCUSSION) + # in speech.RecognitionMetadata + metadata = speech.RecognitionMetadata() + metadata.interaction_type = speech.RecognitionMetadata.InteractionType.DISCUSSION metadata.microphone_distance = ( - speech.enums.RecognitionMetadata.MicrophoneDistance.NEARFIELD) + speech.RecognitionMetadata.MicrophoneDistance.NEARFIELD + ) metadata.recording_device_type = ( - speech.enums.RecognitionMetadata.RecordingDeviceType.SMARTPHONE) + speech.RecognitionMetadata.RecordingDeviceType.SMARTPHONE + ) # Some metadata fields are free form strings metadata.recording_device_name = "Pixel 2 XL" # And some are integers, for instance the 6 digit NAICS code # https://www.naics.com/search/ metadata.industry_naics_code_of_audio = 519190 - audio = speech.types.RecognitionAudio(content=content) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + audio = speech.RecognitionAudio(content=content) + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=8000, - language_code='en-US', + language_code="en-US", # Add this in the request to send metadata. - metadata=metadata) + metadata=metadata, + ) - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print(u'First alternative of result {}'.format(i)) - print(u'Transcript: {}'.format(alternative.transcript)) + print("-" * 20) + print("First alternative of result {}".format(i)) + print("Transcript: {}".format(alternative.transcript)) # [END speech_transcribe_recognition_metadata_beta] @@ -110,28 +115,30 @@ def transcribe_file_with_auto_punctuation(): """Transcribe the given audio file with auto punctuation enabled.""" # [START speech_transcribe_auto_punctuation_beta] from google.cloud import speech_v1p1beta1 as speech + client = speech.SpeechClient() - speech_file = 'resources/commercial_mono.wav' + speech_file = "resources/commercial_mono.wav" - with io.open(speech_file, 'rb') as audio_file: + with io.open(speech_file, "rb") as audio_file: content = audio_file.read() - audio = speech.types.RecognitionAudio(content=content) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + audio = speech.RecognitionAudio(content=content) + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=8000, - language_code='en-US', + language_code="en-US", # Enable automatic punctuation - enable_automatic_punctuation=True) + enable_automatic_punctuation=True, + ) - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print(u'First alternative of result {}'.format(i)) - print(u'Transcript: {}'.format(alternative.transcript)) + print("-" * 20) + print("First alternative of result {}".format(i)) + print("Transcript: {}".format(alternative.transcript)) # [END speech_transcribe_auto_punctuation_beta] @@ -139,24 +146,26 @@ def transcribe_file_with_diarization(): """Transcribe the given audio file synchronously with diarization.""" # [START speech_transcribe_diarization_beta] from google.cloud import speech_v1p1beta1 as speech + client = speech.SpeechClient() - speech_file = 'resources/commercial_mono.wav' + speech_file = "resources/commercial_mono.wav" - with open(speech_file, 'rb') as audio_file: + with open(speech_file, "rb") as audio_file: content = audio_file.read() - audio = speech.types.RecognitionAudio(content=content) + audio = speech.RecognitionAudio(content=content) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=8000, - language_code='en-US', + language_code="en-US", enable_speaker_diarization=True, - diarization_speaker_count=2) + diarization_speaker_count=2, + ) - print('Waiting for operation to complete...') - response = client.recognize(config, audio) + print("Waiting for operation to complete...") + response = client.recognize(request={"config": config, "audio": audio}) # The transcript within each result is separate and sequential per result. # However, the words list within an alternative includes all the words @@ -168,8 +177,7 @@ def transcribe_file_with_diarization(): # Printing out the output: for word_info in words_info: - print(u"word: '{}', speaker_tag: {}".format( - word_info.word, word_info.speaker_tag)) + print(f"word: '{word_info.word}', speaker_tag: {word_info.speaker_tag}") # [END speech_transcribe_diarization_beta] @@ -178,30 +186,32 @@ def transcribe_file_with_multichannel(): multi channel.""" # [START speech_transcribe_multichannel_beta] from google.cloud import speech_v1p1beta1 as speech + client = speech.SpeechClient() - speech_file = 'resources/Google_Gnome.wav' + speech_file = "resources/Google_Gnome.wav" - with open(speech_file, 'rb') as audio_file: + with open(speech_file, "rb") as audio_file: content = audio_file.read() - audio = speech.types.RecognitionAudio(content=content) + audio = speech.RecognitionAudio(content=content) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=16000, - language_code='en-US', + language_code="en-US", audio_channel_count=1, - enable_separate_recognition_per_channel=True) + enable_separate_recognition_per_channel=True, + ) - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print('First alternative of result {}'.format(i)) - print(u'Transcript: {}'.format(alternative.transcript)) - print(u'Channel Tag: {}'.format(result.channel_tag)) + print("-" * 20) + print("First alternative of result {}".format(i)) + print("Transcript: {}".format(alternative.transcript)) + print("Channel Tag: {}".format(result.channel_tag)) # [END speech_transcribe_multichannel_beta] @@ -210,32 +220,34 @@ def transcribe_file_with_multilanguage(): multi language.""" # [START speech_transcribe_multilanguage_beta] from google.cloud import speech_v1p1beta1 as speech + client = speech.SpeechClient() - speech_file = 'resources/multi.wav' - first_lang = 'en-US' - second_lang = 'es' + speech_file = "resources/multi.wav" + first_lang = "en-US" + second_lang = "es" - with open(speech_file, 'rb') as audio_file: + with open(speech_file, "rb") as audio_file: content = audio_file.read() - audio = speech.types.RecognitionAudio(content=content) + audio = speech.RecognitionAudio(content=content) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=44100, audio_channel_count=2, language_code=first_lang, - alternative_language_codes=[second_lang]) + alternative_language_codes=[second_lang], + ) - print('Waiting for operation to complete...') - response = client.recognize(config, audio) + print("Waiting for operation to complete...") + response = client.recognize(request={"config": config, "audio": audio}) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print(u'First alternative of result {}: {}'.format(i, alternative)) - print(u'Transcript: {}'.format(alternative.transcript)) + print("-" * 20) + print("First alternative of result {}: {}".format(i, alternative)) + print("Transcript: {}".format(alternative.transcript)) # [END speech_transcribe_multilanguage_beta] @@ -244,52 +256,57 @@ def transcribe_file_with_word_level_confidence(): word level confidence.""" # [START speech_transcribe_word_level_confidence_beta] from google.cloud import speech_v1p1beta1 as speech + client = speech.SpeechClient() - speech_file = 'resources/Google_Gnome.wav' + speech_file = "resources/Google_Gnome.wav" - with open(speech_file, 'rb') as audio_file: + with open(speech_file, "rb") as audio_file: content = audio_file.read() - audio = speech.types.RecognitionAudio(content=content) + audio = speech.RecognitionAudio(content=content) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=16000, - language_code='en-US', - enable_word_confidence=True) + language_code="en-US", + enable_word_confidence=True, + ) - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print('First alternative of result {}'.format(i)) - print(u'Transcript: {}'.format(alternative.transcript)) - print(u'First Word and Confidence: ({}, {})'.format( - alternative.words[0].word, alternative.words[0].confidence)) + print("-" * 20) + print("First alternative of result {}".format(i)) + print("Transcript: {}".format(alternative.transcript)) + print( + "First Word and Confidence: ({}, {})".format( + alternative.words[0].word, alternative.words[0].confidence + ) + ) # [END speech_transcribe_word_level_confidence_beta] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('command') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("command") args = parser.parse_args() - if args.command == 'enhanced-model': + if args.command == "enhanced-model": transcribe_file_with_enhanced_model() - elif args.command == 'metadata': + elif args.command == "metadata": transcribe_file_with_metadata() - elif args.command == 'punctuation': + elif args.command == "punctuation": transcribe_file_with_auto_punctuation() - elif args.command == 'diarization': + elif args.command == "diarization": transcribe_file_with_diarization() - elif args.command == 'multi-channel': + elif args.command == "multi-channel": transcribe_file_with_multichannel() - elif args.command == 'multi-language': + elif args.command == "multi-language": transcribe_file_with_multilanguage() - elif args.command == 'word-level-conf': + elif args.command == "word-level-conf": transcribe_file_with_word_level_confidence() diff --git a/packages/google-cloud-python-speech/samples/snippets/beta_snippets_test.py b/packages/google-cloud-python-speech/samples/snippets/beta_snippets_test.py index 367d2ccc4b1b..d1242df50bd7 100644 --- a/packages/google-cloud-python-speech/samples/snippets/beta_snippets_test.py +++ b/packages/google-cloud-python-speech/samples/snippets/beta_snippets_test.py @@ -20,30 +20,31 @@ transcribe_file_with_metadata, transcribe_file_with_multichannel, transcribe_file_with_multilanguage, - transcribe_file_with_word_level_confidence) + transcribe_file_with_word_level_confidence, +) -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") def test_transcribe_file_with_enhanced_model(capsys): transcribe_file_with_enhanced_model() out, _ = capsys.readouterr() - assert 'Chrome' in out + assert "Chrome" in out def test_transcribe_file_with_metadata(capsys): transcribe_file_with_metadata() out, _ = capsys.readouterr() - assert 'Chrome' in out + assert "Chrome" in out def test_transcribe_file_with_auto_punctuation(capsys): transcribe_file_with_auto_punctuation() out, _ = capsys.readouterr() - assert 'First alternative of result ' in out + assert "First alternative of result " in out def test_transcribe_diarization(capsys): @@ -58,18 +59,18 @@ def test_transcribe_multichannel_file(capsys): transcribe_file_with_multichannel() out, err = capsys.readouterr() - assert 'OK Google stream stranger things from Netflix to my TV' in out + assert "OK Google stream stranger things from Netflix to my TV" in out def test_transcribe_multilanguage_file(capsys): transcribe_file_with_multilanguage() out, err = capsys.readouterr() - assert 'how are you doing estoy bien e tu' in out + assert "how are you doing estoy bien e tu" in out def test_transcribe_word_level_confidence(capsys): transcribe_file_with_word_level_confidence() out, err = capsys.readouterr() - assert 'OK Google stream stranger things from Netflix to my TV' in out + assert "OK Google stream stranger things from Netflix to my TV" in out diff --git a/packages/google-cloud-python-speech/samples/snippets/quickstart.py b/packages/google-cloud-python-speech/samples/snippets/quickstart.py index f90f52fb04b3..ad0ab3275838 100644 --- a/packages/google-cloud-python-speech/samples/snippets/quickstart.py +++ b/packages/google-cloud-python-speech/samples/snippets/quickstart.py @@ -23,8 +23,7 @@ def run_quickstart(): # Imports the Google Cloud client library # [START speech_python_migration_imports] from google.cloud import speech - from google.cloud.speech import enums - from google.cloud.speech import types + # [END speech_python_migration_imports] # Instantiates a client @@ -33,28 +32,26 @@ def run_quickstart(): # [END speech_python_migration_client] # The name of the audio file to transcribe - file_name = os.path.join( - os.path.dirname(__file__), - 'resources', - 'audio.raw') + file_name = os.path.join(os.path.dirname(__file__), "resources", "audio.raw") # Loads the audio into memory - with io.open(file_name, 'rb') as audio_file: + with io.open(file_name, "rb") as audio_file: content = audio_file.read() - audio = types.RecognitionAudio(content=content) + audio = speech.RecognitionAudio(content=content) - config = types.RecognitionConfig( - encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=16000, - language_code='en-US') + language_code="en-US", + ) # Detects speech in the audio file - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for result in response.results: - print('Transcript: {}'.format(result.alternatives[0].transcript)) + print("Transcript: {}".format(result.alternatives[0].transcript)) # [END speech_quickstart] -if __name__ == '__main__': +if __name__ == "__main__": run_quickstart() diff --git a/packages/google-cloud-python-speech/samples/snippets/quickstart_test.py b/packages/google-cloud-python-speech/samples/snippets/quickstart_test.py index 0675ad195d3a..7fcca1856a79 100644 --- a/packages/google-cloud-python-speech/samples/snippets/quickstart_test.py +++ b/packages/google-cloud-python-speech/samples/snippets/quickstart_test.py @@ -19,4 +19,4 @@ def test_quickstart(capsys): quickstart.run_quickstart() out, _ = capsys.readouterr() - assert 'Transcript: how old is the Brooklyn Bridge' in out + assert "Transcript: how old is the Brooklyn Bridge" in out diff --git a/packages/google-cloud-python-speech/samples/snippets/speech_adaptation_beta.py b/packages/google-cloud-python-speech/samples/snippets/speech_adaptation_beta.py index 35e9527fdea8..890bb8ed7284 100644 --- a/packages/google-cloud-python-speech/samples/snippets/speech_adaptation_beta.py +++ b/packages/google-cloud-python-speech/samples/snippets/speech_adaptation_beta.py @@ -26,7 +26,6 @@ # [START speech_adaptation_beta] from google.cloud import speech_v1p1beta1 -from google.cloud.speech_v1p1beta1 import enums def sample_recognize(storage_uri, phrase): @@ -62,7 +61,7 @@ def sample_recognize(storage_uri, phrase): # Encoding of audio data sent. This sample sets this explicitly. # This field is optional for FLAC and WAV audio formats. - encoding = enums.RecognitionConfig.AudioEncoding.MP3 + encoding = speech_v1p1beta1.RecognitionConfig.AudioEncoding.MP3 config = { "speech_contexts": speech_contexts, "sample_rate_hertz": sample_rate_hertz, @@ -71,14 +70,13 @@ def sample_recognize(storage_uri, phrase): } audio = {"uri": storage_uri} - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for result in response.results: # First alternative is the most probable result alternative = result.alternatives[0] print(u"Transcript: {}".format(alternative.transcript)) - -# [END speech_adaptation_beta] + # [END speech_adaptation_beta] return response diff --git a/packages/google-cloud-python-speech/samples/snippets/speech_quickstart_beta.py b/packages/google-cloud-python-speech/samples/snippets/speech_quickstart_beta.py index 431f6d5490c2..ba1efab1a847 100644 --- a/packages/google-cloud-python-speech/samples/snippets/speech_quickstart_beta.py +++ b/packages/google-cloud-python-speech/samples/snippets/speech_quickstart_beta.py @@ -26,7 +26,6 @@ # [START speech_quickstart_beta] from google.cloud import speech_v1p1beta1 -from google.cloud.speech_v1p1beta1 import enums def sample_recognize(storage_uri): @@ -49,7 +48,7 @@ def sample_recognize(storage_uri): # Encoding of audio data sent. This sample sets this explicitly. # This field is optional for FLAC and WAV audio formats. - encoding = enums.RecognitionConfig.AudioEncoding.MP3 + encoding = speech_v1p1beta1.RecognitionConfig.AudioEncoding.MP3 config = { "language_code": language_code, "sample_rate_hertz": sample_rate_hertz, @@ -57,13 +56,13 @@ def sample_recognize(storage_uri): } audio = {"uri": storage_uri} - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for result in response.results: # First alternative is the most probable result alternative = result.alternatives[0] print(u"Transcript: {}".format(alternative.transcript)) -# [END speech_quickstart_beta] + # [END speech_quickstart_beta] return response diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe.py b/packages/google-cloud-python-speech/samples/snippets/transcribe.py index 1ff446d43652..2cd21ddc3194 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe.py @@ -29,32 +29,34 @@ def transcribe_file(speech_file): """Transcribe the given audio file.""" from google.cloud import speech - from google.cloud.speech import enums - from google.cloud.speech import types import io + client = speech.SpeechClient() # [START speech_python_migration_sync_request] # [START speech_python_migration_config] - with io.open(speech_file, 'rb') as audio_file: + with io.open(speech_file, "rb") as audio_file: content = audio_file.read() - audio = types.RecognitionAudio(content=content) - config = types.RecognitionConfig( - encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, + audio = speech.RecognitionAudio(content=content) + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=16000, - language_code='en-US') + language_code="en-US", + ) # [END speech_python_migration_config] # [START speech_python_migration_sync_response] - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) # [END speech_python_migration_sync_request] # Each result is for a consecutive portion of the audio. Iterate through # them to get the transcripts for the entire audio file. for result in response.results: # The first alternative is the most likely one for this portion. - print(u'Transcript: {}'.format(result.alternatives[0].transcript)) + print(u"Transcript: {}".format(result.alternatives[0].transcript)) # [END speech_python_migration_sync_response] + + # [END speech_transcribe_sync] @@ -62,35 +64,36 @@ def transcribe_file(speech_file): def transcribe_gcs(gcs_uri): """Transcribes the audio file specified by the gcs_uri.""" from google.cloud import speech - from google.cloud.speech import enums - from google.cloud.speech import types + client = speech.SpeechClient() # [START speech_python_migration_config_gcs] - audio = types.RecognitionAudio(uri=gcs_uri) - config = types.RecognitionConfig( - encoding=enums.RecognitionConfig.AudioEncoding.FLAC, + audio = speech.RecognitionAudio(uri=gcs_uri) + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.FLAC, sample_rate_hertz=16000, - language_code='en-US') + language_code="en-US", + ) # [END speech_python_migration_config_gcs] - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) # Each result is for a consecutive portion of the audio. Iterate through # them to get the transcripts for the entire audio file. for result in response.results: # The first alternative is the most likely one for this portion. - print(u'Transcript: {}'.format(result.alternatives[0].transcript)) + print(u"Transcript: {}".format(result.alternatives[0].transcript)) + + # [END speech_transcribe_sync_gcs] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - 'path', help='File or GCS path for audio file to be recognized') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("path", help="File or GCS path for audio file to be recognized") args = parser.parse_args() - if args.path.startswith('gs://'): + if args.path.startswith("gs://"): transcribe_gcs(args.path) else: transcribe_file(args.path) diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_async.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_async.py index 0f9f5b2dc606..789f2f36edc1 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_async.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_async.py @@ -30,34 +30,38 @@ def transcribe_file(speech_file): """Transcribe the given audio file asynchronously.""" from google.cloud import speech - from google.cloud.speech import enums - from google.cloud.speech import types + client = speech.SpeechClient() # [START speech_python_migration_async_request] - with io.open(speech_file, 'rb') as audio_file: + with io.open(speech_file, "rb") as audio_file: content = audio_file.read() - audio = types.RecognitionAudio(content=content) - config = types.RecognitionConfig( - encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, + audio = speech.RecognitionAudio(content=content) + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=16000, - language_code='en-US') + language_code="en-US", + ) # [START speech_python_migration_async_response] - operation = client.long_running_recognize(config, audio) + operation = client.long_running_recognize( + request={"config": config, "audio": audio} + ) # [END speech_python_migration_async_request] - print('Waiting for operation to complete...') + print("Waiting for operation to complete...") response = operation.result(timeout=90) # Each result is for a consecutive portion of the audio. Iterate through # them to get the transcripts for the entire audio file. for result in response.results: # The first alternative is the most likely one for this portion. - print(u'Transcript: {}'.format(result.alternatives[0].transcript)) - print('Confidence: {}'.format(result.alternatives[0].confidence)) + print(u"Transcript: {}".format(result.alternatives[0].transcript)) + print("Confidence: {}".format(result.alternatives[0].confidence)) # [END speech_python_migration_async_response] + + # [END speech_transcribe_async] @@ -65,38 +69,41 @@ def transcribe_file(speech_file): def transcribe_gcs(gcs_uri): """Asynchronously transcribes the audio file specified by the gcs_uri.""" from google.cloud import speech - from google.cloud.speech import enums - from google.cloud.speech import types + client = speech.SpeechClient() - audio = types.RecognitionAudio(uri=gcs_uri) - config = types.RecognitionConfig( - encoding=enums.RecognitionConfig.AudioEncoding.FLAC, + audio = speech.RecognitionAudio(uri=gcs_uri) + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.FLAC, sample_rate_hertz=16000, - language_code='en-US') + language_code="en-US", + ) - operation = client.long_running_recognize(config, audio) + operation = client.long_running_recognize( + request={"config": config, "audio": audio} + ) - print('Waiting for operation to complete...') + print("Waiting for operation to complete...") response = operation.result(timeout=90) # Each result is for a consecutive portion of the audio. Iterate through # them to get the transcripts for the entire audio file. for result in response.results: # The first alternative is the most likely one for this portion. - print(u'Transcript: {}'.format(result.alternatives[0].transcript)) - print('Confidence: {}'.format(result.alternatives[0].confidence)) + print(u"Transcript: {}".format(result.alternatives[0].transcript)) + print("Confidence: {}".format(result.alternatives[0].confidence)) + + # [END speech_transcribe_async_gcs] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - 'path', help='File or GCS path for audio file to be recognized') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("path", help="File or GCS path for audio file to be recognized") args = parser.parse_args() - if args.path.startswith('gs://'): + if args.path.startswith("gs://"): transcribe_gcs(args.path) else: transcribe_file(args.path) diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_async_test.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_async_test.py index 7d66747eb446..47d5f8385a78 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_async_test.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_async_test.py @@ -16,20 +16,18 @@ import transcribe_async -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") def test_transcribe(capsys): - transcribe_async.transcribe_file( - os.path.join(RESOURCES, 'audio.raw')) + transcribe_async.transcribe_file(os.path.join(RESOURCES, "audio.raw")) out, err = capsys.readouterr() - assert re.search(r'how old is the Brooklyn Bridge', out, re.DOTALL | re.I) + assert re.search(r"how old is the Brooklyn Bridge", out, re.DOTALL | re.I) def test_transcribe_gcs(capsys): - transcribe_async.transcribe_gcs( - 'gs://python-docs-samples-tests/speech/audio.flac') + transcribe_async.transcribe_gcs("gs://python-docs-samples-tests/speech/audio.flac") out, err = capsys.readouterr() - assert re.search(r'how old is the Brooklyn Bridge', out, re.DOTALL | re.I) + assert re.search(r"how old is the Brooklyn Bridge", out, re.DOTALL | re.I) diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_auto_punctuation.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_auto_punctuation.py index 4e65afafaf43..106de0f772a3 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_auto_punctuation.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_auto_punctuation.py @@ -29,35 +29,37 @@ def transcribe_file_with_auto_punctuation(path): """Transcribe the given audio file with auto punctuation enabled.""" # [START speech_transcribe_auto_punctuation] from google.cloud import speech + client = speech.SpeechClient() # path = 'resources/commercial_mono.wav' - with io.open(path, 'rb') as audio_file: + with io.open(path, "rb") as audio_file: content = audio_file.read() - audio = speech.types.RecognitionAudio(content=content) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + audio = speech.RecognitionAudio(content=content) + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=8000, - language_code='en-US', + language_code="en-US", # Enable automatic punctuation - enable_automatic_punctuation=True) + enable_automatic_punctuation=True, + ) - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print('First alternative of result {}'.format(i)) - print('Transcript: {}'.format(alternative.transcript)) + print("-" * 20) + print("First alternative of result {}".format(i)) + print("Transcript: {}".format(alternative.transcript)) # [END speech_transcribe_auto_punctuation] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('path', help='File to stream to the API') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("path", help="File to stream to the API") args = parser.parse_args() diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_auto_punctuation_test.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_auto_punctuation_test.py index e42018d47a6e..8e95eac68e34 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_auto_punctuation_test.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_auto_punctuation_test.py @@ -15,12 +15,13 @@ import transcribe_auto_punctuation -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") def test_transcribe_file_with_auto_punctuation(capsys): transcribe_auto_punctuation.transcribe_file_with_auto_punctuation( - 'resources/commercial_mono.wav') + "resources/commercial_mono.wav" + ) out, _ = capsys.readouterr() - assert 'First alternative of result ' in out + assert "First alternative of result " in out diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_context_classes.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_context_classes.py index af483928ce92..69f40fd9c4ce 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_context_classes.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_context_classes.py @@ -18,30 +18,32 @@ def transcribe_context_classes(storage_uri): favor specific classes of words in the results.""" # [START speech_context_classes] from google.cloud import speech + client = speech.SpeechClient() # storage_uri = 'gs://YOUR_BUCKET_ID/path/to/your/file.wav' - audio = speech.types.RecognitionAudio(uri=storage_uri) + audio = speech.RecognitionAudio(uri=storage_uri) # SpeechContext: to configure your speech_context see: # https://cloud.google.com/speech-to-text/docs/reference/rpc/google.cloud.speech.v1#speechcontext # Full list of supported phrases (class tokens) here: # https://cloud.google.com/speech-to-text/docs/class-tokens - speech_context = speech.types.SpeechContext(phrases=['$TIME']) + speech_context = speech.SpeechContext(phrases=["$TIME"]) # RecognitionConfig: to configure your encoding and sample_rate_hertz, see: # https://cloud.google.com/speech-to-text/docs/reference/rpc/google.cloud.speech.v1#recognitionconfig - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=8000, - language_code='en-US', - speech_contexts=[speech_context]) + language_code="en-US", + speech_contexts=[speech_context], + ) - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print('First alternative of result {}'.format(i)) - print('Transcript: {}'.format(alternative.transcript)) + print("-" * 20) + print("First alternative of result {}".format(i)) + print("Transcript: {}".format(alternative.transcript)) # [END speech_context_classes] diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_context_classes_test.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_context_classes_test.py index e0d10d6f5410..61642fb2a5ce 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_context_classes_test.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_context_classes_test.py @@ -16,7 +16,8 @@ def test_transcribe_context_classes(capsys): transcribe_context_classes.transcribe_context_classes( - 'gs://cloud-samples-data/speech/commercial_mono.wav') + "gs://cloud-samples-data/speech/commercial_mono.wav" + ) out, _ = capsys.readouterr() - assert 'First alternative of result ' in out + assert "First alternative of result " in out diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_enhanced_model.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_enhanced_model.py index 1b233c52696c..6b2862c7c55c 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_enhanced_model.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_enhanced_model.py @@ -34,35 +34,36 @@ def transcribe_file_with_enhanced_model(path): client = speech.SpeechClient() # path = 'resources/commercial_mono.wav' - with io.open(path, 'rb') as audio_file: + with io.open(path, "rb") as audio_file: content = audio_file.read() - audio = speech.types.RecognitionAudio(content=content) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + audio = speech.RecognitionAudio(content=content) + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=8000, - language_code='en-US', + language_code="en-US", # Enhanced models are only available to projects that # opt in for audio data collection. use_enhanced=True, # A model must be specified to use enhanced model. - model='phone_call') + model="phone_call", + ) - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print('First alternative of result {}'.format(i)) - print('Transcript: {}'.format(alternative.transcript)) + print("-" * 20) + print("First alternative of result {}".format(i)) + print("Transcript: {}".format(alternative.transcript)) # [END speech_transcribe_enhanced_model] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('path', help='File to stream to the API') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("path", help="File to stream to the API") args = parser.parse_args() diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_enhanced_model_test.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_enhanced_model_test.py index 6e5676cfb8ff..cf673111604a 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_enhanced_model_test.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_enhanced_model_test.py @@ -15,12 +15,13 @@ import transcribe_enhanced_model -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") def test_transcribe_file_with_enhanced_model(capsys): transcribe_enhanced_model.transcribe_file_with_enhanced_model( - 'resources/commercial_mono.wav') + "resources/commercial_mono.wav" + ) out, _ = capsys.readouterr() - assert 'Chrome' in out + assert "Chrome" in out diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_model_selection.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_model_selection.py index f81b9e72dd16..a25fc1d51472 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_model_selection.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_model_selection.py @@ -32,26 +32,30 @@ def transcribe_model_selection(speech_file, model): """Transcribe the given audio file synchronously with the selected model.""" from google.cloud import speech + client = speech.SpeechClient() - with open(speech_file, 'rb') as audio_file: + with open(speech_file, "rb") as audio_file: content = audio_file.read() - audio = speech.types.RecognitionAudio(content=content) + audio = speech.RecognitionAudio(content=content) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=16000, - language_code='en-US', - model=model) + language_code="en-US", + model=model, + ) - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print('First alternative of result {}'.format(i)) - print(u'Transcript: {}'.format(alternative.transcript)) + print("-" * 20) + print("First alternative of result {}".format(i)) + print(u"Transcript: {}".format(alternative.transcript)) + + # [END speech_transcribe_model_selection] @@ -60,43 +64,50 @@ def transcribe_model_selection_gcs(gcs_uri, model): """Transcribe the given audio file asynchronously with the selected model.""" from google.cloud import speech + client = speech.SpeechClient() - audio = speech.types.RecognitionAudio(uri=gcs_uri) + audio = speech.RecognitionAudio(uri=gcs_uri) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=16000, - language_code='en-US', - model=model) + language_code="en-US", + model=model, + ) - operation = client.long_running_recognize(config, audio) + operation = client.long_running_recognize( + request={"config": config, "audio": audio} + ) - print('Waiting for operation to complete...') + print("Waiting for operation to complete...") response = operation.result(timeout=90) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print('First alternative of result {}'.format(i)) - print(u'Transcript: {}'.format(alternative.transcript)) + print("-" * 20) + print("First alternative of result {}".format(i)) + print(u"Transcript: {}".format(alternative.transcript)) + + # [END speech_transcribe_model_selection_gcs] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - 'path', help='File or GCS path for audio file to be recognized') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("path", help="File or GCS path for audio file to be recognized") parser.add_argument( - '--model', help='The speech recognition model to use', - choices=['command_and_search', 'phone_call', 'video', 'default'], - default='default') + "--model", + help="The speech recognition model to use", + choices=["command_and_search", "phone_call", "video", "default"], + default="default", + ) args = parser.parse_args() - if args.path.startswith('gs://'): + if args.path.startswith("gs://"): transcribe_model_selection_gcs(args.path, args.model) else: transcribe_model_selection(args.path, args.model) diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_model_selection_test.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_model_selection_test.py index 07bd91a4a0ae..59d04fe0b99a 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_model_selection_test.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_model_selection_test.py @@ -16,20 +16,22 @@ import transcribe_model_selection -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") def test_transcribe_model_selection_file(capsys): transcribe_model_selection.transcribe_model_selection( - os.path.join(RESOURCES, 'Google_Gnome.wav'), 'video') + os.path.join(RESOURCES, "Google_Gnome.wav"), "video" + ) out, err = capsys.readouterr() - assert re.search(r'the weather outside is sunny', out, re.DOTALL | re.I) + assert re.search(r"the weather outside is sunny", out, re.DOTALL | re.I) def test_transcribe_model_selection_gcs(capsys): transcribe_model_selection.transcribe_model_selection_gcs( - 'gs://cloud-samples-tests/speech/Google_Gnome.wav', 'video') + "gs://cloud-samples-tests/speech/Google_Gnome.wav", "video" + ) out, err = capsys.readouterr() - assert re.search(r'the weather outside is sunny', out, re.DOTALL | re.I) + assert re.search(r"the weather outside is sunny", out, re.DOTALL | re.I) diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_multichannel.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_multichannel.py index e84da59ad7b3..c5b4d5de95c5 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_multichannel.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_multichannel.py @@ -30,28 +30,30 @@ def transcribe_file_with_multichannel(speech_file): multi channel.""" # [START speech_transcribe_multichannel] from google.cloud import speech + client = speech.SpeechClient() - with open(speech_file, 'rb') as audio_file: + with open(speech_file, "rb") as audio_file: content = audio_file.read() - audio = speech.types.RecognitionAudio(content=content) + audio = speech.RecognitionAudio(content=content) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=44100, - language_code='en-US', + language_code="en-US", audio_channel_count=2, - enable_separate_recognition_per_channel=True) + enable_separate_recognition_per_channel=True, + ) - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print('First alternative of result {}'.format(i)) - print(u'Transcript: {}'.format(alternative.transcript)) - print(u'Channel Tag: {}'.format(result.channel_tag)) + print("-" * 20) + print("First alternative of result {}".format(i)) + print(u"Transcript: {}".format(alternative.transcript)) + print(u"Channel Tag: {}".format(result.channel_tag)) # [END speech_transcribe_multichannel] @@ -60,36 +62,37 @@ def transcribe_gcs_with_multichannel(gcs_uri): multi channel.""" # [START speech_transcribe_multichannel_gcs] from google.cloud import speech + client = speech.SpeechClient() - audio = speech.types.RecognitionAudio(uri=gcs_uri) + audio = speech.RecognitionAudio(uri=gcs_uri) - config = speech.types.RecognitionConfig( - encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=44100, - language_code='en-US', + language_code="en-US", audio_channel_count=2, - enable_separate_recognition_per_channel=True) + enable_separate_recognition_per_channel=True, + ) - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for i, result in enumerate(response.results): alternative = result.alternatives[0] - print('-' * 20) - print('First alternative of result {}'.format(i)) - print(u'Transcript: {}'.format(alternative.transcript)) - print(u'Channel Tag: {}'.format(result.channel_tag)) + print("-" * 20) + print("First alternative of result {}".format(i)) + print(u"Transcript: {}".format(alternative.transcript)) + print(u"Channel Tag: {}".format(result.channel_tag)) # [END speech_transcribe_multichannel_gcs] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - 'path', help='File or GCS path for audio file to be recognized') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("path", help="File or GCS path for audio file to be recognized") args = parser.parse_args() - if args.path.startswith('gs://'): + if args.path.startswith("gs://"): transcribe_gcs_with_multichannel(args.path) else: transcribe_file_with_multichannel(args.path) diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_multichannel_test.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_multichannel_test.py index de9558629994..54808d169e66 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_multichannel_test.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_multichannel_test.py @@ -15,22 +15,21 @@ from transcribe_multichannel import ( transcribe_file_with_multichannel, - transcribe_gcs_with_multichannel) + transcribe_gcs_with_multichannel, +) -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") def test_transcribe_multichannel_file(capsys): - transcribe_file_with_multichannel( - os.path.join(RESOURCES, 'multi.wav')) + transcribe_file_with_multichannel(os.path.join(RESOURCES, "multi.wav")) out, err = capsys.readouterr() - assert 'how are you doing' in out + assert "how are you doing" in out def test_transcribe_multichannel_gcs(capsys): - transcribe_gcs_with_multichannel( - 'gs://cloud-samples-data/speech/multi.wav') + transcribe_gcs_with_multichannel("gs://cloud-samples-data/speech/multi.wav") out, err = capsys.readouterr() - assert 'how are you doing' in out + assert "how are you doing" in out diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_onprem/transcribe_onprem.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_onprem/transcribe_onprem.py index 2c050a153f37..844ef1a0ae16 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_onprem/transcribe_onprem.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_onprem/transcribe_onprem.py @@ -26,7 +26,6 @@ def transcribe_onprem(local_file_path, api_endpoint): api_endpoint: Endpoint to call for speech recognition, e.g. 0.0.0.0:10000 """ from google.cloud import speech_v1p1beta1 - from google.cloud.speech_v1p1beta1 import enums import grpc import io @@ -35,8 +34,11 @@ def transcribe_onprem(local_file_path, api_endpoint): # Create a gRPC channel to your server channel = grpc.insecure_channel(target=api_endpoint) + transport = speech_v1p1beta1.services.speech.transports.SpeechGrpcTransport( + channel=channel + ) - client = speech_v1p1beta1.SpeechClient(channel=channel) + client = speech_v1p1beta1.SpeechClient(transport=transport) # The language of the supplied audio language_code = "en-US" @@ -46,7 +48,7 @@ def transcribe_onprem(local_file_path, api_endpoint): # Encoding of audio data sent. This sample sets this explicitly. # This field is optional for FLAC and WAV audio formats. - encoding = enums.RecognitionConfig.AudioEncoding.LINEAR16 + encoding = speech_v1p1beta1.RecognitionConfig.AudioEncoding.LINEAR16 config = { "encoding": encoding, "language_code": language_code, @@ -56,18 +58,19 @@ def transcribe_onprem(local_file_path, api_endpoint): content = f.read() audio = {"content": content} - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for result in response.results: # First alternative is the most probable result alternative = result.alternatives[0] print(f"Transcript: {alternative.transcript}") + + # [END speech_transcribe_onprem] if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( "--file_path", @@ -81,6 +84,4 @@ def transcribe_onprem(local_file_path, api_endpoint): ) args = parser.parse_args() - transcribe_onprem( - local_file_path=args.file_path, api_endpoint=args.api_endpoint - ) + transcribe_onprem(local_file_path=args.file_path, api_endpoint=args.api_endpoint) diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_streaming.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_streaming.py index de727c221592..d3dc96e5db98 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_streaming.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_streaming.py @@ -28,28 +28,29 @@ def transcribe_streaming(stream_file): """Streams transcription of the given audio file.""" import io from google.cloud import speech - from google.cloud.speech import enums - from google.cloud.speech import types + client = speech.SpeechClient() # [START speech_python_migration_streaming_request] - with io.open(stream_file, 'rb') as audio_file: + with io.open(stream_file, "rb") as audio_file: content = audio_file.read() # In practice, stream should be a generator yielding chunks of audio data. stream = [content] - requests = (types.StreamingRecognizeRequest(audio_content=chunk) - for chunk in stream) + requests = ( + speech.StreamingRecognizeRequest(audio_content=chunk) for chunk in stream + ) - config = types.RecognitionConfig( - encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=16000, - language_code='en-US') - streaming_config = types.StreamingRecognitionConfig(config=config) + language_code="en-US", + ) + streaming_config = speech.StreamingRecognitionConfig(config=config) # streaming_recognize returns a generator. # [START speech_python_migration_streaming_response] - responses = client.streaming_recognize(streaming_config, requests) + responses = client.streaming_recognize(config=streaming_config, requests=requests,) # [END speech_python_migration_streaming_request] for response in responses: @@ -57,21 +58,23 @@ def transcribe_streaming(stream_file): # is_final result. The other results will be for subsequent portions of # the audio. for result in response.results: - print('Finished: {}'.format(result.is_final)) - print('Stability: {}'.format(result.stability)) + print("Finished: {}".format(result.is_final)) + print("Stability: {}".format(result.stability)) alternatives = result.alternatives # The alternatives are ordered from most likely to least. for alternative in alternatives: - print('Confidence: {}'.format(alternative.confidence)) - print(u'Transcript: {}'.format(alternative.transcript)) + print("Confidence: {}".format(alternative.confidence)) + print(u"Transcript: {}".format(alternative.transcript)) # [END speech_python_migration_streaming_response] + + # [END speech_transcribe_streaming] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('stream', help='File to stream to the API') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("stream", help="File to stream to the API") args = parser.parse_args() transcribe_streaming(args.stream) diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_streaming_test.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_streaming_test.py index 2b3ca8ee5c0b..6eadbe1ecf44 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_streaming_test.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_streaming_test.py @@ -16,12 +16,11 @@ import transcribe_streaming -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") def test_transcribe_streaming(capsys): - transcribe_streaming.transcribe_streaming( - os.path.join(RESOURCES, 'audio.raw')) + transcribe_streaming.transcribe_streaming(os.path.join(RESOURCES, "audio.raw")) out, err = capsys.readouterr() - assert re.search(r'how old is the Brooklyn Bridge', out, re.DOTALL | re.I) + assert re.search(r"how old is the Brooklyn Bridge", out, re.DOTALL | re.I) diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_test.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_test.py index d1e9f6338ea6..7aac4f865875 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_test.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_test.py @@ -16,19 +16,18 @@ import transcribe -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") def test_transcribe_file(capsys): - transcribe.transcribe_file(os.path.join(RESOURCES, 'audio.raw')) + transcribe.transcribe_file(os.path.join(RESOURCES, "audio.raw")) out, err = capsys.readouterr() - assert re.search(r'how old is the Brooklyn Bridge', out, re.DOTALL | re.I) + assert re.search(r"how old is the Brooklyn Bridge", out, re.DOTALL | re.I) def test_transcribe_gcs(capsys): - transcribe.transcribe_gcs( - 'gs://python-docs-samples-tests/speech/audio.flac') + transcribe.transcribe_gcs("gs://python-docs-samples-tests/speech/audio.flac") out, err = capsys.readouterr() - assert re.search(r'how old is the Brooklyn Bridge', out, re.DOTALL | re.I) + assert re.search(r"how old is the Brooklyn Bridge", out, re.DOTALL | re.I) diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_word_time_offsets.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_word_time_offsets.py index 43ddf38c9aae..b49f2ecbe8f7 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_word_time_offsets.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_word_time_offsets.py @@ -30,34 +30,33 @@ def transcribe_file_with_word_time_offsets(speech_file): """Transcribe the given audio file synchronously and output the word time offsets.""" from google.cloud import speech - from google.cloud.speech import enums - from google.cloud.speech import types + client = speech.SpeechClient() - with io.open(speech_file, 'rb') as audio_file: + with io.open(speech_file, "rb") as audio_file: content = audio_file.read() - audio = types.RecognitionAudio(content=content) - config = types.RecognitionConfig( - encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, + audio = speech.RecognitionAudio(content=content) + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=16000, - language_code='en-US', - enable_word_time_offsets=True) + language_code="en-US", + enable_word_time_offsets=True, + ) - response = client.recognize(config, audio) + response = client.recognize(request={"config": config, "audio": audio}) for result in response.results: alternative = result.alternatives[0] - print(u'Transcript: {}'.format(alternative.transcript)) + print("Transcript: {}".format(alternative.transcript)) for word_info in alternative.words: word = word_info.word start_time = word_info.start_time end_time = word_info.end_time - print('Word: {}, start_time: {}, end_time: {}'.format( - word, - start_time.seconds + start_time.nanos * 1e-9, - end_time.seconds + end_time.nanos * 1e-9)) + print( + f"Word: {word}, start_time: {start_time.total_seconds()}, end_time: {end_time.total_seconds()}" + ) # [START speech_transcribe_async_word_time_offsets_gcs] @@ -65,46 +64,48 @@ def transcribe_gcs_with_word_time_offsets(gcs_uri): """Transcribe the given audio file asynchronously and output the word time offsets.""" from google.cloud import speech - from google.cloud.speech import enums - from google.cloud.speech import types + client = speech.SpeechClient() - audio = types.RecognitionAudio(uri=gcs_uri) - config = types.RecognitionConfig( - encoding=enums.RecognitionConfig.AudioEncoding.FLAC, + audio = speech.RecognitionAudio(uri=gcs_uri) + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.FLAC, sample_rate_hertz=16000, - language_code='en-US', - enable_word_time_offsets=True) + language_code="en-US", + enable_word_time_offsets=True, + ) - operation = client.long_running_recognize(config, audio) + operation = client.long_running_recognize( + request={"config": config, "audio": audio} + ) - print('Waiting for operation to complete...') + print("Waiting for operation to complete...") result = operation.result(timeout=90) for result in result.results: alternative = result.alternatives[0] - print(u'Transcript: {}'.format(alternative.transcript)) - print('Confidence: {}'.format(alternative.confidence)) + print("Transcript: {}".format(alternative.transcript)) + print("Confidence: {}".format(alternative.confidence)) for word_info in alternative.words: word = word_info.word start_time = word_info.start_time end_time = word_info.end_time - print('Word: {}, start_time: {}, end_time: {}'.format( - word, - start_time.seconds + start_time.nanos * 1e-9, - end_time.seconds + end_time.nanos * 1e-9)) + print( + f"Word: {word}, start_time: {start_time.total_seconds()}, end_time: {end_time.total_seconds()}" + ) + + # [END speech_transcribe_async_word_time_offsets_gcs] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - 'path', help='File or GCS path for audio file to be recognized') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("path", help="File or GCS path for audio file to be recognized") args = parser.parse_args() - if args.path.startswith('gs://'): + if args.path.startswith("gs://"): transcribe_gcs_with_word_time_offsets(args.path) else: transcribe_file_with_word_time_offsets(args.path) diff --git a/packages/google-cloud-python-speech/samples/snippets/transcribe_word_time_offsets_test.py b/packages/google-cloud-python-speech/samples/snippets/transcribe_word_time_offsets_test.py index e894385f1e62..185209494529 100644 --- a/packages/google-cloud-python-speech/samples/snippets/transcribe_word_time_offsets_test.py +++ b/packages/google-cloud-python-speech/samples/snippets/transcribe_word_time_offsets_test.py @@ -16,16 +16,17 @@ import transcribe_word_time_offsets -RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") def test_transcribe_file_with_word_time_offsets(capsys): transcribe_word_time_offsets.transcribe_file_with_word_time_offsets( - os.path.join(RESOURCES, 'audio.raw')) + os.path.join(RESOURCES, "audio.raw") + ) out, _ = capsys.readouterr() print(out) - match = re.search(r'Bridge, start_time: ([0-9.]+)', out, re.DOTALL | re.I) + match = re.search(r"Bridge, start_time: ([0-9.]+)", out, re.DOTALL | re.I) time = float(match.group(1)) assert time > 0 @@ -33,11 +34,12 @@ def test_transcribe_file_with_word_time_offsets(capsys): def test_transcribe_gcs_with_word_time_offsets(capsys): transcribe_word_time_offsets.transcribe_gcs_with_word_time_offsets( - 'gs://python-docs-samples-tests/speech/audio.flac') + "gs://python-docs-samples-tests/speech/audio.flac" + ) out, _ = capsys.readouterr() print(out) - match = re.search(r'Bridge, start_time: ([0-9.]+)', out, re.DOTALL | re.I) + match = re.search(r"Bridge, start_time: ([0-9.]+)", out, re.DOTALL | re.I) time = float(match.group(1)) assert time > 0 diff --git a/packages/google-cloud-python-speech/scripts/decrypt-secrets.sh b/packages/google-cloud-python-speech/scripts/decrypt-secrets.sh index ff599eb2af25..21f6d2a26d90 100755 --- a/packages/google-cloud-python-speech/scripts/decrypt-secrets.sh +++ b/packages/google-cloud-python-speech/scripts/decrypt-secrets.sh @@ -20,14 +20,27 @@ ROOT=$( dirname "$DIR" ) # Work from the project root. cd $ROOT +# Prevent it from overriding files. +# We recommend that sample authors use their own service account files and cloud project. +# In that case, they are supposed to prepare these files by themselves. +if [[ -f "testing/test-env.sh" ]] || \ + [[ -f "testing/service-account.json" ]] || \ + [[ -f "testing/client-secrets.json" ]]; then + echo "One or more target files exist, aborting." + exit 1 +fi + # Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + --project="${PROJECT_ID}" \ > testing/test-env.sh gcloud secrets versions access latest \ --secret="python-docs-samples-service-account" \ + --project="${PROJECT_ID}" \ > testing/service-account.json gcloud secrets versions access latest \ --secret="python-docs-samples-client-secrets" \ - > testing/client-secrets.json \ No newline at end of file + --project="${PROJECT_ID}" \ + > testing/client-secrets.json diff --git a/packages/google-cloud-python-speech/scripts/fixup_speech_v1_keywords.py b/packages/google-cloud-python-speech/scripts/fixup_speech_v1_keywords.py new file mode 100644 index 000000000000..9d4c6169f23e --- /dev/null +++ b/packages/google-cloud-python-speech/scripts/fixup_speech_v1_keywords.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class speechCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'long_running_recognize': ('config', 'audio', ), + 'recognize': ('config', 'audio', ), + 'streaming_recognize': ('streaming_config', 'audio_content', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=speechCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the speech client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-cloud-python-speech/scripts/fixup_speech_v1p1beta1_keywords.py b/packages/google-cloud-python-speech/scripts/fixup_speech_v1p1beta1_keywords.py new file mode 100644 index 000000000000..9d4c6169f23e --- /dev/null +++ b/packages/google-cloud-python-speech/scripts/fixup_speech_v1p1beta1_keywords.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class speechCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'long_running_recognize': ('config', 'audio', ), + 'recognize': ('config', 'audio', ), + 'streaming_recognize': ('streaming_config', 'audio_content', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=speechCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the speech client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-cloud-python-speech/setup.py b/packages/google-cloud-python-speech/setup.py index 49d6395d366d..bde077684bdc 100644 --- a/packages/google-cloud-python-speech/setup.py +++ b/packages/google-cloud-python-speech/setup.py @@ -28,7 +28,11 @@ # 'Development Status :: 4 - Beta' # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" -dependencies = ["google-api-core[grpc] >= 1.14.0, < 2.0.0dev"] +dependencies = [ + "google-api-core[grpc] >= 1.22.2, < 2.0.0dev", + "libcst >= 0.2.5", + "proto-plus >= 1.4.0", +] extras = {} @@ -43,7 +47,9 @@ # Only include packages under the 'google' namespace. Do not include tests, # benchmarks, etc. packages = [ - package for package in setuptools.find_packages() if package.startswith("google") + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") ] # Determine which namespaces are needed. @@ -66,12 +72,10 @@ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Operating System :: OS Independent", "Topic :: Internet", ], @@ -80,7 +84,11 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", + python_requires=">=3.6", + scripts=[ + "scripts/fixup_speech_v1_keywords.py", + "scripts/fixup_speech_v1p1beta1_keywords.py", + ], include_package_data=True, zip_safe=False, ) diff --git a/packages/google-cloud-python-speech/synth.metadata b/packages/google-cloud-python-speech/synth.metadata index 590dab316585..c8576a2a2d35 100644 --- a/packages/google-cloud-python-speech/synth.metadata +++ b/packages/google-cloud-python-speech/synth.metadata @@ -3,30 +3,30 @@ { "git": { "name": ".", - "remote": "git@github.com:googleapis/python-speech.git", - "sha": "97b9e2481b2ed0817cd34238f240a013fb5dd6a5" + "remote": "git@github.com:googleapis/python-speech", + "sha": "64076bb484d3cf716f0a663f50956f147190e7c8" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "2db5725bf898b544a0cf951e1694d3b0fce5eda3", - "internalRef": "329384854" + "sha": "3dbeac0d54125b123c8dfd39c774b37473c36944", + "internalRef": "333159182" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "d91dd8aac77f7a9c5506c238038a26fa4f9e361e" + "sha": "f3c04883d6c43261ff13db1f52d03a283be06871" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "d91dd8aac77f7a9c5506c238038a26fa4f9e361e" + "sha": "f3c04883d6c43261ff13db1f52d03a283be06871" } } ], diff --git a/packages/google-cloud-python-speech/synth.py b/packages/google-cloud-python-speech/synth.py index 13791dae90dc..55e578534f46 100644 --- a/packages/google-cloud-python-speech/synth.py +++ b/packages/google-cloud-python-speech/synth.py @@ -36,46 +36,42 @@ # Don't move over __init__.py, as we modify it to make the generated client # use helpers.py. - s.move(library / f"google/cloud/speech_{version}/types.py") - s.move(library / f"google/cloud/speech_{version}/gapic") - s.move(library / f"google/cloud/speech_{version}/proto") - s.move(library / f"tests/unit/gapic/{version}") - s.move(library / f"docs/gapic/{version}") - s.move(library / f"samples") + s.move(library, excludes=["setup.py", "docs/index.rst", "README.rst"]) -# Use the highest version library to generate documentation import alias. -s.move(library / "google/cloud/speech.py") +# Add the manually written SpeechHelpers to v1 and v1p1beta1 +# See google/cloud/speech_v1/helpers.py for details +count = s.replace( +["google/cloud/speech_v1/__init__.py", "google/cloud/speech_v1p1beta1/__init__.py"], +"""__all__ = \(""", +"""from google.cloud.speech_v1.helpers import SpeechHelpers +class SpeechClient(SpeechHelpers, SpeechClient): + __doc__ = SpeechClient.__doc__ -# Fix tests to use the direct gapic client instead of the wrapped helper -# client. -s.replace( - "tests/unit/**/test*client*.py", - r"from google\.cloud import speech_(.+?)$", - r"from google.cloud.speech_\1.gapic import speech_client as speech_\1", -) - +__all__ = ( +""", + ) -# Fix bad docstring -s.replace( - "google/**/resource_pb2.py", - """``\\\\ ``e\.g\.``\\\\ - \$MONTH\\\\ ``\.""", - """``\ ``e.g.``\$MONTH\ ``.""" +# Import from speech_v1 to get the client with SpeechHelpers +count = s.replace( +"google/cloud/speech/__init__.py", +"""from google\.cloud\.speech_v1\.services\.speech\.client import SpeechClient""", +"""from google.cloud.speech_v1 import SpeechClient""" ) -s.replace( - "google/**/resource_pb2.py", - """\(e\.g\. ``\\\\ \{my- - months\}`\)\.""", - """(e.g. ``\ {my-months}``).""" -) # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- -templated_files = common.py_library(cov_level=87, samples=True) -s.move(templated_files) +templated_files = common.py_library( + samples=True, # set to True only if there are samples + microgenerator=True, + cov_level=99, +) +s.move( + templated_files, excludes=[".coveragerc"] +) # microgenerator has a good .coveragerc file + # ---------------------------------------------------------------------------- # Samples templates @@ -83,9 +79,4 @@ python.py_samples(skip_readmes=True) - -# TODO(busunkim): Use latest sphinx after microgenerator transition -s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"') - - s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/packages/google-cloud-python-speech/tests/system/gapic/v1/test_system_speech_v1.py b/packages/google-cloud-python-speech/tests/system/gapic/v1/test_system_speech_v1.py index 1c30e3fac6b9..b39d333cf510 100644 --- a/packages/google-cloud-python-speech/tests/system/gapic/v1/test_system_speech_v1.py +++ b/packages/google-cloud-python-speech/tests/system/gapic/v1/test_system_speech_v1.py @@ -30,7 +30,7 @@ def test_recognize(self): client = speech_v1.SpeechClient() config = { - "encoding": speech_v1.enums.RecognitionConfig.AudioEncoding.FLAC, + "encoding": speech_v1.RecognitionConfig.AudioEncoding.FLAC, "language_code": "en-US", "sample_rate_hertz": 16000, } @@ -38,7 +38,7 @@ def test_recognize(self): uri = "gs://{}/speech/brooklyn.flac".format(BUCKET) audio = {"uri": uri} - response = client.recognize(config, audio) + response = client.recognize(config=config, audio=audio) assert response.results[0].alternatives[0].transcript is not None @@ -51,8 +51,8 @@ def test_long_running_recognize(self): client = speech_v1.SpeechClient() - config = speech_v1.types.RecognitionConfig( - encoding=speech_v1.enums.RecognitionConfig.AudioEncoding.FLAC, + config = speech_v1.RecognitionConfig( + encoding=speech_v1.RecognitionConfig.AudioEncoding.FLAC, language_code="en-US", sample_rate_hertz=16000, ) @@ -60,7 +60,7 @@ def test_long_running_recognize(self): uri = "gs://{}/speech/brooklyn.flac".format(BUCKET) audio = {"uri": uri} - response = client.long_running_recognize(config, audio) + response = client.long_running_recognize(config=config, audio=audio) assert response.result() is not None @@ -73,21 +73,21 @@ def test_streaming_recognize(self): client = speech_v1.SpeechClient() - config = speech_v1.types.RecognitionConfig( - encoding=speech_v1.enums.RecognitionConfig.AudioEncoding.FLAC, + config = speech_v1.RecognitionConfig( + encoding=speech_v1.RecognitionConfig.AudioEncoding.FLAC, language_code="en-US", sample_rate_hertz=16000, ) - streamingConfig = speech_v1.types.StreamingRecognitionConfig(config=config) + streamingConfig = speech_v1.StreamingRecognitionConfig(config=config) uri = "https://storage.googleapis.com/{}/speech/brooklyn.flac".format(BUCKET) streaming_requests = [ - speech_v1.types.StreamingRecognizeRequest( - audio_content=requests.get(uri).content - ) + speech_v1.StreamingRecognizeRequest(audio_content=requests.get(uri).content) ] - responses = client.streaming_recognize(streamingConfig, streaming_requests) + responses = client.streaming_recognize( + config=streamingConfig, requests=streaming_requests + ) for response in responses: for result in response.results: diff --git a/packages/google-cloud-python-speech/tests/system/gapic/v1p1beta1/test_system_speech_v1p1beta1.py b/packages/google-cloud-python-speech/tests/system/gapic/v1p1beta1/test_system_speech_v1p1beta1.py index 36514089d364..13758df30ec4 100644 --- a/packages/google-cloud-python-speech/tests/system/gapic/v1p1beta1/test_system_speech_v1p1beta1.py +++ b/packages/google-cloud-python-speech/tests/system/gapic/v1p1beta1/test_system_speech_v1p1beta1.py @@ -30,7 +30,7 @@ def test_recognize(self): client = speech_v1p1beta1.SpeechClient() config = { - "encoding": speech_v1p1beta1.enums.RecognitionConfig.AudioEncoding.FLAC, + "encoding": speech_v1p1beta1.RecognitionConfig.AudioEncoding.FLAC, "language_code": "en-US", "sample_rate_hertz": 16000, } @@ -38,7 +38,7 @@ def test_recognize(self): uri = "gs://{}/speech/brooklyn.flac".format(BUCKET) audio = {"uri": uri} - response = client.recognize(config, audio) + response = client.recognize(config=config, audio=audio) assert response.results[0].alternatives[0].transcript is not None @@ -52,7 +52,7 @@ def test_long_running_recognize(self): client = speech_v1p1beta1.SpeechClient() config = speech_v1p1beta1.types.RecognitionConfig( - encoding=speech_v1p1beta1.enums.RecognitionConfig.AudioEncoding.FLAC, + encoding=speech_v1p1beta1.RecognitionConfig.AudioEncoding.FLAC, language_code="en-US", sample_rate_hertz=16000, ) @@ -60,7 +60,7 @@ def test_long_running_recognize(self): uri = "gs://{}/speech/brooklyn.flac".format(BUCKET) audio = {"uri": uri} - response = client.long_running_recognize(config, audio) + response = client.long_running_recognize(config=config, audio=audio) assert response.result() is not None @@ -73,23 +73,23 @@ def test_streaming_recognize(self): client = speech_v1p1beta1.SpeechClient() - config = speech_v1p1beta1.types.RecognitionConfig( - encoding=speech_v1p1beta1.enums.RecognitionConfig.AudioEncoding.FLAC, + config = speech_v1p1beta1.RecognitionConfig( + encoding=speech_v1p1beta1.RecognitionConfig.AudioEncoding.FLAC, language_code="en-US", sample_rate_hertz=16000, ) - streamingConfig = speech_v1p1beta1.types.StreamingRecognitionConfig( - config=config - ) + streamingConfig = speech_v1p1beta1.StreamingRecognitionConfig(config=config) uri = "https://storage.googleapis.com/{}/speech/brooklyn.flac".format(BUCKET) streaming_requests = [ - speech_v1p1beta1.types.StreamingRecognizeRequest( + speech_v1p1beta1.StreamingRecognizeRequest( audio_content=requests.get(uri).content ) ] - responses = client.streaming_recognize(streamingConfig, streaming_requests) + responses = client.streaming_recognize( + config=streamingConfig, requests=streaming_requests + ) for response in responses: for result in response.results: diff --git a/packages/google-cloud-python-speech/tests/unit/gapic/speech_v1/__init__.py b/packages/google-cloud-python-speech/tests/unit/gapic/speech_v1/__init__.py new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/packages/google-cloud-python-speech/tests/unit/gapic/speech_v1/__init__.py @@ -0,0 +1 @@ + diff --git a/packages/google-cloud-python-speech/tests/unit/gapic/speech_v1/test_speech.py b/packages/google-cloud-python-speech/tests/unit/gapic/speech_v1/test_speech.py new file mode 100644 index 000000000000..ccc2201bb9a6 --- /dev/null +++ b/packages/google-cloud-python-speech/tests/unit/gapic/speech_v1/test_speech.py @@ -0,0 +1,1130 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.speech_v1.services.speech import SpeechAsyncClient +from google.cloud.speech_v1.services.speech import SpeechClient +from google.cloud.speech_v1.services.speech import transports +from google.cloud.speech_v1.types import cloud_speech +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.rpc import status_pb2 as status # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SpeechClient._get_default_mtls_endpoint(None) is None + assert SpeechClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ( + SpeechClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + ) + assert ( + SpeechClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + SpeechClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert SpeechClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [SpeechClient, SpeechAsyncClient]) +def test_speech_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "speech.googleapis.com:443" + + +def test_speech_client_get_transport_class(): + transport = SpeechClient.get_transport_class() + assert transport == transports.SpeechGrpcTransport + + transport = SpeechClient.get_transport_class("grpc") + assert transport == transports.SpeechGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (SpeechClient, transports.SpeechGrpcTransport, "grpc"), + (SpeechAsyncClient, transports.SpeechGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +@mock.patch.object( + SpeechClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpeechClient) +) +@mock.patch.object( + SpeechAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpeechAsyncClient) +) +def test_speech_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SpeechClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SpeechClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (SpeechClient, transports.SpeechGrpcTransport, "grpc", "true"), + ( + SpeechAsyncClient, + transports.SpeechGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (SpeechClient, transports.SpeechGrpcTransport, "grpc", "false"), + ( + SpeechAsyncClient, + transports.SpeechGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + SpeechClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpeechClient) +) +@mock.patch.object( + SpeechAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpeechAsyncClient) +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_speech_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (SpeechClient, transports.SpeechGrpcTransport, "grpc"), + (SpeechAsyncClient, transports.SpeechGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_speech_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (SpeechClient, transports.SpeechGrpcTransport, "grpc"), + (SpeechAsyncClient, transports.SpeechGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_speech_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_speech_client_client_options_from_dict(): + with mock.patch( + "google.cloud.speech_v1.services.speech.transports.SpeechGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = SpeechClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_recognize(transport: str = "grpc", request_type=cloud_speech.RecognizeRequest): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.recognize), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cloud_speech.RecognizeResponse() + + response = client.recognize(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == cloud_speech.RecognizeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cloud_speech.RecognizeResponse) + + +def test_recognize_from_dict(): + test_recognize(request_type=dict) + + +@pytest.mark.asyncio +async def test_recognize_async(transport: str = "grpc_asyncio"): + client = SpeechAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cloud_speech.RecognizeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cloud_speech.RecognizeResponse() + ) + + response = await client.recognize(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cloud_speech.RecognizeResponse) + + +def test_recognize_flattened(): + client = SpeechClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.recognize), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cloud_speech.RecognizeResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.recognize( + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].config == cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ) + + assert args[0].audio == cloud_speech.RecognitionAudio(content=b"content_blob") + + +def test_recognize_flattened_error(): + client = SpeechClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.recognize( + cloud_speech.RecognizeRequest(), + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + +@pytest.mark.asyncio +async def test_recognize_flattened_async(): + client = SpeechAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cloud_speech.RecognizeResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cloud_speech.RecognizeResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.recognize( + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].config == cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ) + + assert args[0].audio == cloud_speech.RecognitionAudio(content=b"content_blob") + + +@pytest.mark.asyncio +async def test_recognize_flattened_error_async(): + client = SpeechAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.recognize( + cloud_speech.RecognizeRequest(), + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + +def test_long_running_recognize( + transport: str = "grpc", request_type=cloud_speech.LongRunningRecognizeRequest +): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.long_running_recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.long_running_recognize(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == cloud_speech.LongRunningRecognizeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_long_running_recognize_from_dict(): + test_long_running_recognize(request_type=dict) + + +@pytest.mark.asyncio +async def test_long_running_recognize_async(transport: str = "grpc_asyncio"): + client = SpeechAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cloud_speech.LongRunningRecognizeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.long_running_recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.long_running_recognize(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_long_running_recognize_flattened(): + client = SpeechClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.long_running_recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.long_running_recognize( + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].config == cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ) + + assert args[0].audio == cloud_speech.RecognitionAudio(content=b"content_blob") + + +def test_long_running_recognize_flattened_error(): + client = SpeechClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.long_running_recognize( + cloud_speech.LongRunningRecognizeRequest(), + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + +@pytest.mark.asyncio +async def test_long_running_recognize_flattened_async(): + client = SpeechAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.long_running_recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.long_running_recognize( + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].config == cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ) + + assert args[0].audio == cloud_speech.RecognitionAudio(content=b"content_blob") + + +@pytest.mark.asyncio +async def test_long_running_recognize_flattened_error_async(): + client = SpeechAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.long_running_recognize( + cloud_speech.LongRunningRecognizeRequest(), + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + +def test_streaming_recognize( + transport: str = "grpc", request_type=cloud_speech.StreamingRecognizeRequest +): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + requests = [request] + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.streaming_recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([cloud_speech.StreamingRecognizeResponse()]) + + response = client.streaming_recognize(iter(requests)) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert next(args[0]) == request + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, cloud_speech.StreamingRecognizeResponse) + + +def test_streaming_recognize_from_dict(): + test_streaming_recognize(request_type=dict) + + +@pytest.mark.asyncio +async def test_streaming_recognize_async(transport: str = "grpc_asyncio"): + client = SpeechAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cloud_speech.StreamingRecognizeRequest() + + requests = [request] + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.streaming_recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[cloud_speech.StreamingRecognizeResponse()] + ) + + response = await client.streaming_recognize(iter(requests)) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert next(args[0]) == request + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, cloud_speech.StreamingRecognizeResponse) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SpeechGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SpeechGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpeechClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SpeechGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpeechClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpeechGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = SpeechClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpeechGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.SpeechGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [transports.SpeechGrpcTransport, transports.SpeechGrpcAsyncIOTransport], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = SpeechClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.SpeechGrpcTransport,) + + +def test_speech_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.SpeechTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_speech_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.speech_v1.services.speech.transports.SpeechTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.SpeechTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "recognize", + "long_running_recognize", + "streaming_recognize", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_speech_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.speech_v1.services.speech.transports.SpeechTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.SpeechTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_speech_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.speech_v1.services.speech.transports.SpeechTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.SpeechTransport() + adc.assert_called_once() + + +def test_speech_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + SpeechClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_speech_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.SpeechGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_speech_host_no_port(): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="speech.googleapis.com" + ), + ) + assert client._transport._host == "speech.googleapis.com:443" + + +def test_speech_host_with_port(): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="speech.googleapis.com:8000" + ), + ) + assert client._transport._host == "speech.googleapis.com:8000" + + +def test_speech_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.SpeechGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_speech_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.SpeechGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [transports.SpeechGrpcTransport, transports.SpeechGrpcAsyncIOTransport], +) +def test_speech_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [transports.SpeechGrpcTransport, transports.SpeechGrpcAsyncIOTransport], +) +def test_speech_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_speech_grpc_lro_client(): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_speech_grpc_lro_async_client(): + client = SpeechAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client._client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.SpeechTransport, "_prep_wrapped_messages" + ) as prep: + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.SpeechTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = SpeechClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/packages/google-cloud-python-speech/tests/unit/gapic/speech_v1p1beta1/__init__.py b/packages/google-cloud-python-speech/tests/unit/gapic/speech_v1p1beta1/__init__.py new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/packages/google-cloud-python-speech/tests/unit/gapic/speech_v1p1beta1/__init__.py @@ -0,0 +1 @@ + diff --git a/packages/google-cloud-python-speech/tests/unit/gapic/speech_v1p1beta1/test_speech.py b/packages/google-cloud-python-speech/tests/unit/gapic/speech_v1p1beta1/test_speech.py new file mode 100644 index 000000000000..584ad0ac7b15 --- /dev/null +++ b/packages/google-cloud-python-speech/tests/unit/gapic/speech_v1p1beta1/test_speech.py @@ -0,0 +1,1181 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.speech_v1p1beta1.services.speech import SpeechAsyncClient +from google.cloud.speech_v1p1beta1.services.speech import SpeechClient +from google.cloud.speech_v1p1beta1.services.speech import transports +from google.cloud.speech_v1p1beta1.types import cloud_speech +from google.cloud.speech_v1p1beta1.types import resource +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.rpc import status_pb2 as status # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SpeechClient._get_default_mtls_endpoint(None) is None + assert SpeechClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ( + SpeechClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + ) + assert ( + SpeechClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + SpeechClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert SpeechClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [SpeechClient, SpeechAsyncClient]) +def test_speech_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "speech.googleapis.com:443" + + +def test_speech_client_get_transport_class(): + transport = SpeechClient.get_transport_class() + assert transport == transports.SpeechGrpcTransport + + transport = SpeechClient.get_transport_class("grpc") + assert transport == transports.SpeechGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (SpeechClient, transports.SpeechGrpcTransport, "grpc"), + (SpeechAsyncClient, transports.SpeechGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +@mock.patch.object( + SpeechClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpeechClient) +) +@mock.patch.object( + SpeechAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpeechAsyncClient) +) +def test_speech_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SpeechClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SpeechClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (SpeechClient, transports.SpeechGrpcTransport, "grpc", "true"), + ( + SpeechAsyncClient, + transports.SpeechGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (SpeechClient, transports.SpeechGrpcTransport, "grpc", "false"), + ( + SpeechAsyncClient, + transports.SpeechGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + SpeechClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpeechClient) +) +@mock.patch.object( + SpeechAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpeechAsyncClient) +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_speech_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (SpeechClient, transports.SpeechGrpcTransport, "grpc"), + (SpeechAsyncClient, transports.SpeechGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_speech_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (SpeechClient, transports.SpeechGrpcTransport, "grpc"), + (SpeechAsyncClient, transports.SpeechGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_speech_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_speech_client_client_options_from_dict(): + with mock.patch( + "google.cloud.speech_v1p1beta1.services.speech.transports.SpeechGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = SpeechClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_recognize(transport: str = "grpc", request_type=cloud_speech.RecognizeRequest): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.recognize), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cloud_speech.RecognizeResponse() + + response = client.recognize(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == cloud_speech.RecognizeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cloud_speech.RecognizeResponse) + + +def test_recognize_from_dict(): + test_recognize(request_type=dict) + + +@pytest.mark.asyncio +async def test_recognize_async(transport: str = "grpc_asyncio"): + client = SpeechAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cloud_speech.RecognizeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cloud_speech.RecognizeResponse() + ) + + response = await client.recognize(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, cloud_speech.RecognizeResponse) + + +def test_recognize_flattened(): + client = SpeechClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.recognize), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cloud_speech.RecognizeResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.recognize( + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].config == cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ) + + assert args[0].audio == cloud_speech.RecognitionAudio(content=b"content_blob") + + +def test_recognize_flattened_error(): + client = SpeechClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.recognize( + cloud_speech.RecognizeRequest(), + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + +@pytest.mark.asyncio +async def test_recognize_flattened_async(): + client = SpeechAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cloud_speech.RecognizeResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cloud_speech.RecognizeResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.recognize( + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].config == cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ) + + assert args[0].audio == cloud_speech.RecognitionAudio(content=b"content_blob") + + +@pytest.mark.asyncio +async def test_recognize_flattened_error_async(): + client = SpeechAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.recognize( + cloud_speech.RecognizeRequest(), + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + +def test_long_running_recognize( + transport: str = "grpc", request_type=cloud_speech.LongRunningRecognizeRequest +): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.long_running_recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.long_running_recognize(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == cloud_speech.LongRunningRecognizeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_long_running_recognize_from_dict(): + test_long_running_recognize(request_type=dict) + + +@pytest.mark.asyncio +async def test_long_running_recognize_async(transport: str = "grpc_asyncio"): + client = SpeechAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cloud_speech.LongRunningRecognizeRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.long_running_recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.long_running_recognize(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_long_running_recognize_flattened(): + client = SpeechClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.long_running_recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.long_running_recognize( + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].config == cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ) + + assert args[0].audio == cloud_speech.RecognitionAudio(content=b"content_blob") + + +def test_long_running_recognize_flattened_error(): + client = SpeechClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.long_running_recognize( + cloud_speech.LongRunningRecognizeRequest(), + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + +@pytest.mark.asyncio +async def test_long_running_recognize_flattened_async(): + client = SpeechAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.long_running_recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.long_running_recognize( + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].config == cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ) + + assert args[0].audio == cloud_speech.RecognitionAudio(content=b"content_blob") + + +@pytest.mark.asyncio +async def test_long_running_recognize_flattened_error_async(): + client = SpeechAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.long_running_recognize( + cloud_speech.LongRunningRecognizeRequest(), + config=cloud_speech.RecognitionConfig( + encoding=cloud_speech.RecognitionConfig.AudioEncoding.LINEAR16 + ), + audio=cloud_speech.RecognitionAudio(content=b"content_blob"), + ) + + +def test_streaming_recognize( + transport: str = "grpc", request_type=cloud_speech.StreamingRecognizeRequest +): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + requests = [request] + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.streaming_recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([cloud_speech.StreamingRecognizeResponse()]) + + response = client.streaming_recognize(iter(requests)) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert next(args[0]) == request + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, cloud_speech.StreamingRecognizeResponse) + + +def test_streaming_recognize_from_dict(): + test_streaming_recognize(request_type=dict) + + +@pytest.mark.asyncio +async def test_streaming_recognize_async(transport: str = "grpc_asyncio"): + client = SpeechAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = cloud_speech.StreamingRecognizeRequest() + + requests = [request] + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.streaming_recognize), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[cloud_speech.StreamingRecognizeResponse()] + ) + + response = await client.streaming_recognize(iter(requests)) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert next(args[0]) == request + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, cloud_speech.StreamingRecognizeResponse) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SpeechGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SpeechGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpeechClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SpeechGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpeechClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpeechGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = SpeechClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpeechGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.SpeechGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [transports.SpeechGrpcTransport, transports.SpeechGrpcAsyncIOTransport], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = SpeechClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.SpeechGrpcTransport,) + + +def test_speech_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.SpeechTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_speech_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.speech_v1p1beta1.services.speech.transports.SpeechTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.SpeechTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "recognize", + "long_running_recognize", + "streaming_recognize", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_speech_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.speech_v1p1beta1.services.speech.transports.SpeechTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.SpeechTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_speech_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.speech_v1p1beta1.services.speech.transports.SpeechTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.SpeechTransport() + adc.assert_called_once() + + +def test_speech_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + SpeechClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_speech_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.SpeechGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_speech_host_no_port(): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="speech.googleapis.com" + ), + ) + assert client._transport._host == "speech.googleapis.com:443" + + +def test_speech_host_with_port(): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="speech.googleapis.com:8000" + ), + ) + assert client._transport._host == "speech.googleapis.com:8000" + + +def test_speech_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.SpeechGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +def test_speech_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.SpeechGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + + +@pytest.mark.parametrize( + "transport_class", + [transports.SpeechGrpcTransport, transports.SpeechGrpcAsyncIOTransport], +) +def test_speech_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "transport_class", + [transports.SpeechGrpcTransport, transports.SpeechGrpcAsyncIOTransport], +) +def test_speech_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_speech_grpc_lro_client(): + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_speech_grpc_lro_async_client(): + client = SpeechAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client._client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_custom_class_path(): + project = "squid" + location = "clam" + custom_class = "whelk" + + expected = "projects/{project}/locations/{location}/customClasses/{custom_class}".format( + project=project, location=location, custom_class=custom_class, + ) + actual = SpeechClient.custom_class_path(project, location, custom_class) + assert expected == actual + + +def test_parse_custom_class_path(): + expected = { + "project": "octopus", + "location": "oyster", + "custom_class": "nudibranch", + } + path = SpeechClient.custom_class_path(**expected) + + # Check that the path construction is reversible. + actual = SpeechClient.parse_custom_class_path(path) + assert expected == actual + + +def test_phrase_set_path(): + project = "squid" + location = "clam" + phrase_set = "whelk" + + expected = "projects/{project}/locations/{location}/phraseSets/{phrase_set}".format( + project=project, location=location, phrase_set=phrase_set, + ) + actual = SpeechClient.phrase_set_path(project, location, phrase_set) + assert expected == actual + + +def test_parse_phrase_set_path(): + expected = { + "project": "octopus", + "location": "oyster", + "phrase_set": "nudibranch", + } + path = SpeechClient.phrase_set_path(**expected) + + # Check that the path construction is reversible. + actual = SpeechClient.parse_phrase_set_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.SpeechTransport, "_prep_wrapped_messages" + ) as prep: + client = SpeechClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.SpeechTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = SpeechClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/packages/google-cloud-python-speech/tests/unit/gapic/v1/test_speech_client_v1.py b/packages/google-cloud-python-speech/tests/unit/gapic/v1/test_speech_client_v1.py deleted file mode 100644 index 29e0e0295a50..000000000000 --- a/packages/google-cloud-python-speech/tests/unit/gapic/v1/test_speech_client_v1.py +++ /dev/null @@ -1,244 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud.speech_v1.gapic import speech_client as speech_v1 -from google.cloud.speech_v1 import enums -from google.cloud.speech_v1.proto import cloud_speech_pb2 -from google.longrunning import operations_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def stream_stream( - self, method, request_serializer=None, response_deserializer=None - ): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestSpeechClient(object): - def test_recognize(self): - # Setup Expected Response - expected_response = {} - expected_response = cloud_speech_pb2.RecognizeResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = speech_v1.SpeechClient() - - # Setup Request - encoding = enums.RecognitionConfig.AudioEncoding.FLAC - sample_rate_hertz = 44100 - language_code = "en-US" - config = { - "encoding": encoding, - "sample_rate_hertz": sample_rate_hertz, - "language_code": language_code, - } - uri = "gs://bucket_name/file_name.flac" - audio = {"uri": uri} - - response = client.recognize(config, audio) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cloud_speech_pb2.RecognizeRequest(config=config, audio=audio) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_recognize_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = speech_v1.SpeechClient() - - # Setup request - encoding = enums.RecognitionConfig.AudioEncoding.FLAC - sample_rate_hertz = 44100 - language_code = "en-US" - config = { - "encoding": encoding, - "sample_rate_hertz": sample_rate_hertz, - "language_code": language_code, - } - uri = "gs://bucket_name/file_name.flac" - audio = {"uri": uri} - - with pytest.raises(CustomException): - client.recognize(config, audio) - - def test_long_running_recognize(self): - # Setup Expected Response - expected_response = {} - expected_response = cloud_speech_pb2.LongRunningRecognizeResponse( - **expected_response - ) - operation = operations_pb2.Operation( - name="operations/test_long_running_recognize", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = speech_v1.SpeechClient() - - # Setup Request - encoding = enums.RecognitionConfig.AudioEncoding.FLAC - sample_rate_hertz = 44100 - language_code = "en-US" - config = { - "encoding": encoding, - "sample_rate_hertz": sample_rate_hertz, - "language_code": language_code, - } - uri = "gs://bucket_name/file_name.flac" - audio = {"uri": uri} - - response = client.long_running_recognize(config, audio) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = cloud_speech_pb2.LongRunningRecognizeRequest( - config=config, audio=audio - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_long_running_recognize_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_long_running_recognize_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = speech_v1.SpeechClient() - - # Setup Request - encoding = enums.RecognitionConfig.AudioEncoding.FLAC - sample_rate_hertz = 44100 - language_code = "en-US" - config = { - "encoding": encoding, - "sample_rate_hertz": sample_rate_hertz, - "language_code": language_code, - } - uri = "gs://bucket_name/file_name.flac" - audio = {"uri": uri} - - response = client.long_running_recognize(config, audio) - exception = response.exception() - assert exception.errors[0] == error - - def test_streaming_recognize(self): - # Setup Expected Response - expected_response = {} - expected_response = cloud_speech_pb2.StreamingRecognizeResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = speech_v1.SpeechClient() - - # Setup Request - request = {} - request = cloud_speech_pb2.StreamingRecognizeRequest(**request) - requests = [request] - - response = client.streaming_recognize(requests) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - actual_requests = channel.requests[0][1] - assert len(actual_requests) == 1 - actual_request = list(actual_requests)[0] - assert request == actual_request - - def test_streaming_recognize_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = speech_v1.SpeechClient() - - # Setup request - request = {} - - request = cloud_speech_pb2.StreamingRecognizeRequest(**request) - requests = [request] - - with pytest.raises(CustomException): - client.streaming_recognize(requests) diff --git a/packages/google-cloud-python-speech/tests/unit/gapic/v1p1beta1/test_speech_client_v1p1beta1.py b/packages/google-cloud-python-speech/tests/unit/gapic/v1p1beta1/test_speech_client_v1p1beta1.py deleted file mode 100644 index c4588b529eab..000000000000 --- a/packages/google-cloud-python-speech/tests/unit/gapic/v1p1beta1/test_speech_client_v1p1beta1.py +++ /dev/null @@ -1,244 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud.speech_v1p1beta1.gapic import speech_client as speech_v1p1beta1 -from google.cloud.speech_v1p1beta1 import enums -from google.cloud.speech_v1p1beta1.proto import cloud_speech_pb2 -from google.longrunning import operations_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def stream_stream( - self, method, request_serializer=None, response_deserializer=None - ): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestSpeechClient(object): - def test_recognize(self): - # Setup Expected Response - expected_response = {} - expected_response = cloud_speech_pb2.RecognizeResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = speech_v1p1beta1.SpeechClient() - - # Setup Request - encoding = enums.RecognitionConfig.AudioEncoding.FLAC - sample_rate_hertz = 44100 - language_code = "en-US" - config = { - "encoding": encoding, - "sample_rate_hertz": sample_rate_hertz, - "language_code": language_code, - } - uri = "gs://bucket_name/file_name.flac" - audio = {"uri": uri} - - response = client.recognize(config, audio) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = cloud_speech_pb2.RecognizeRequest(config=config, audio=audio) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_recognize_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = speech_v1p1beta1.SpeechClient() - - # Setup request - encoding = enums.RecognitionConfig.AudioEncoding.FLAC - sample_rate_hertz = 44100 - language_code = "en-US" - config = { - "encoding": encoding, - "sample_rate_hertz": sample_rate_hertz, - "language_code": language_code, - } - uri = "gs://bucket_name/file_name.flac" - audio = {"uri": uri} - - with pytest.raises(CustomException): - client.recognize(config, audio) - - def test_long_running_recognize(self): - # Setup Expected Response - expected_response = {} - expected_response = cloud_speech_pb2.LongRunningRecognizeResponse( - **expected_response - ) - operation = operations_pb2.Operation( - name="operations/test_long_running_recognize", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = speech_v1p1beta1.SpeechClient() - - # Setup Request - encoding = enums.RecognitionConfig.AudioEncoding.FLAC - sample_rate_hertz = 44100 - language_code = "en-US" - config = { - "encoding": encoding, - "sample_rate_hertz": sample_rate_hertz, - "language_code": language_code, - } - uri = "gs://bucket_name/file_name.flac" - audio = {"uri": uri} - - response = client.long_running_recognize(config, audio) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = cloud_speech_pb2.LongRunningRecognizeRequest( - config=config, audio=audio - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_long_running_recognize_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_long_running_recognize_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = speech_v1p1beta1.SpeechClient() - - # Setup Request - encoding = enums.RecognitionConfig.AudioEncoding.FLAC - sample_rate_hertz = 44100 - language_code = "en-US" - config = { - "encoding": encoding, - "sample_rate_hertz": sample_rate_hertz, - "language_code": language_code, - } - uri = "gs://bucket_name/file_name.flac" - audio = {"uri": uri} - - response = client.long_running_recognize(config, audio) - exception = response.exception() - assert exception.errors[0] == error - - def test_streaming_recognize(self): - # Setup Expected Response - expected_response = {} - expected_response = cloud_speech_pb2.StreamingRecognizeResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = speech_v1p1beta1.SpeechClient() - - # Setup Request - request = {} - request = cloud_speech_pb2.StreamingRecognizeRequest(**request) - requests = [request] - - response = client.streaming_recognize(requests) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - actual_requests = channel.requests[0][1] - assert len(actual_requests) == 1 - actual_request = list(actual_requests)[0] - assert request == actual_request - - def test_streaming_recognize_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = speech_v1p1beta1.SpeechClient() - - # Setup request - request = {} - - request = cloud_speech_pb2.StreamingRecognizeRequest(**request) - requests = [request] - - with pytest.raises(CustomException): - client.streaming_recognize(requests) diff --git a/packages/google-cloud-python-speech/tests/unit/test_helpers.py b/packages/google-cloud-python-speech/tests/unit/test_helpers.py index 366b1821dd4c..d7fdfdd23fd9 100644 --- a/packages/google-cloud-python-speech/tests/unit/test_helpers.py +++ b/packages/google-cloud-python-speech/tests/unit/test_helpers.py @@ -33,7 +33,7 @@ def test_streaming_recognize(): config = types.StreamingRecognitionConfig() requests = [types.StreamingRecognizeRequest(audio_content=b"...")] super_patch = mock.patch( - "google.cloud.speech_v1.speech_client.SpeechClient." "streaming_recognize", + "google.cloud.speech_v1.services.speech.SpeechClient.streaming_recognize", autospec=True, ) @@ -43,10 +43,10 @@ def test_streaming_recognize(): # Assert that we called streaming recognize with an iterable # that evaluates to the correct format. _, args, kwargs = streaming_recognize.mock_calls[0] - api_requests = args[1] + api_requests = kwargs["requests"] assert isinstance(api_requests, GeneratorType) assert list(api_requests) == [ - types.StreamingRecognizeRequest(streaming_config=config), + {"streaming_config": config}, requests[0], ] assert "retry" in kwargs