From 55446270bd62060e75287c1d7a0f0088c7629e95 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 29 Jun 2021 22:24:38 +0000 Subject: [PATCH 1/2] chore: use gapic-generator-python 0.50.3 fix: disable always_use_jwt_access Committer: @busunkim96 PiperOrigin-RevId: 382142900 Source-Link: https://github.com/googleapis/googleapis/commit/513440fda515f3c799c22a30e3906dcda325004e Source-Link: https://github.com/googleapis/googleapis-gen/commit/7b1e2c31233f79a704ec21ca410bf661d6bc68d0 --- owl-bot-staging/v1/.coveragerc | 17 + owl-bot-staging/v1/MANIFEST.in | 2 + owl-bot-staging/v1/README.rst | 49 + owl-bot-staging/v1/docs/conf.py | 376 +++ .../autoscaling_policy_service.rst | 10 + .../docs/dataproc_v1/cluster_controller.rst | 10 + .../v1/docs/dataproc_v1/job_controller.rst | 10 + .../v1/docs/dataproc_v1/services.rst | 9 + owl-bot-staging/v1/docs/dataproc_v1/types.rst | 7 + .../dataproc_v1/workflow_template_service.rst | 10 + owl-bot-staging/v1/docs/index.rst | 7 + .../v1/google/cloud/dataproc/__init__.py | 217 ++ .../v1/google/cloud/dataproc/py.typed | 2 + .../v1/google/cloud/dataproc_v1/__init__.py | 218 ++ .../cloud/dataproc_v1/gapic_metadata.json | 335 ++ .../v1/google/cloud/dataproc_v1/py.typed | 2 + .../cloud/dataproc_v1/services/__init__.py | 15 + .../autoscaling_policy_service/__init__.py | 22 + .../async_client.py | 624 ++++ .../autoscaling_policy_service/client.py | 790 +++++ .../autoscaling_policy_service/pagers.py | 140 + .../transports/__init__.py | 33 + .../transports/base.py | 246 ++ .../transports/grpc.py | 363 +++ .../transports/grpc_asyncio.py | 367 +++ .../services/cluster_controller/__init__.py | 22 + .../cluster_controller/async_client.py | 1020 ++++++ .../services/cluster_controller/client.py | 1178 +++++++ .../services/cluster_controller/pagers.py | 140 + .../cluster_controller/transports/__init__.py | 33 + .../cluster_controller/transports/base.py | 313 ++ .../cluster_controller/transports/grpc.py | 472 +++ .../transports/grpc_asyncio.py | 476 +++ .../services/job_controller/__init__.py | 22 + .../services/job_controller/async_client.py | 796 +++++ .../services/job_controller/client.py | 927 ++++++ .../services/job_controller/pagers.py | 140 + .../job_controller/transports/__init__.py | 33 + .../job_controller/transports/base.py | 308 ++ .../job_controller/transports/grpc.py | 434 +++ .../job_controller/transports/grpc_asyncio.py | 438 +++ .../workflow_template_service/__init__.py | 22 + .../workflow_template_service/async_client.py | 945 ++++++ .../workflow_template_service/client.py | 1103 +++++++ .../workflow_template_service/pagers.py | 140 + .../transports/__init__.py | 33 + .../transports/base.py | 306 ++ .../transports/grpc.py | 481 +++ .../transports/grpc_asyncio.py | 485 +++ .../cloud/dataproc_v1/types/__init__.py | 209 ++ .../dataproc_v1/types/autoscaling_policies.py | 416 +++ .../cloud/dataproc_v1/types/clusters.py | 1797 +++++++++++ .../v1/google/cloud/dataproc_v1/types/jobs.py | 1368 ++++++++ .../cloud/dataproc_v1/types/operations.py | 133 + .../google/cloud/dataproc_v1/types/shared.py | 46 + .../dataproc_v1/types/workflow_templates.py | 1050 ++++++ owl-bot-staging/v1/mypy.ini | 3 + owl-bot-staging/v1/noxfile.py | 132 + .../v1/scripts/fixup_dataproc_v1_keywords.py | 202 ++ owl-bot-staging/v1/setup.py | 53 + owl-bot-staging/v1/tests/__init__.py | 16 + owl-bot-staging/v1/tests/unit/__init__.py | 16 + .../v1/tests/unit/gapic/__init__.py | 16 + .../tests/unit/gapic/dataproc_v1/__init__.py | 16 + .../test_autoscaling_policy_service.py | 2293 +++++++++++++ .../dataproc_v1/test_cluster_controller.py | 2449 ++++++++++++++ .../gapic/dataproc_v1/test_job_controller.py | 2355 ++++++++++++++ .../test_workflow_template_service.py | 2863 +++++++++++++++++ owl-bot-staging/v1beta2/.coveragerc | 17 + owl-bot-staging/v1beta2/MANIFEST.in | 2 + owl-bot-staging/v1beta2/README.rst | 49 + owl-bot-staging/v1beta2/docs/conf.py | 376 +++ .../autoscaling_policy_service.rst | 10 + .../dataproc_v1beta2/cluster_controller.rst | 10 + .../docs/dataproc_v1beta2/job_controller.rst | 10 + .../docs/dataproc_v1beta2/services.rst | 9 + .../v1beta2/docs/dataproc_v1beta2/types.rst | 7 + .../workflow_template_service.rst | 10 + owl-bot-staging/v1beta2/docs/index.rst | 7 + .../v1beta2/google/cloud/dataproc/__init__.py | 205 ++ .../v1beta2/google/cloud/dataproc/py.typed | 2 + .../google/cloud/dataproc_v1beta2/__init__.py | 206 ++ .../dataproc_v1beta2/gapic_metadata.json | 315 ++ .../google/cloud/dataproc_v1beta2/py.typed | 2 + .../dataproc_v1beta2/services/__init__.py | 15 + .../autoscaling_policy_service/__init__.py | 22 + .../async_client.py | 623 ++++ .../autoscaling_policy_service/client.py | 789 +++++ .../autoscaling_policy_service/pagers.py | 140 + .../transports/__init__.py | 33 + .../transports/base.py | 246 ++ .../transports/grpc.py | 363 +++ .../transports/grpc_asyncio.py | 367 +++ .../services/cluster_controller/__init__.py | 22 + .../cluster_controller/async_client.py | 923 ++++++ .../services/cluster_controller/client.py | 1070 ++++++ .../services/cluster_controller/pagers.py | 140 + .../cluster_controller/transports/__init__.py | 33 + .../cluster_controller/transports/base.py | 285 ++ .../cluster_controller/transports/grpc.py | 419 +++ .../transports/grpc_asyncio.py | 423 +++ .../services/job_controller/__init__.py | 22 + .../services/job_controller/async_client.py | 796 +++++ .../services/job_controller/client.py | 927 ++++++ .../services/job_controller/pagers.py | 140 + .../job_controller/transports/__init__.py | 33 + .../job_controller/transports/base.py | 308 ++ .../job_controller/transports/grpc.py | 434 +++ .../job_controller/transports/grpc_asyncio.py | 438 +++ .../workflow_template_service/__init__.py | 22 + .../workflow_template_service/async_client.py | 943 ++++++ .../workflow_template_service/client.py | 1092 +++++++ .../workflow_template_service/pagers.py | 140 + .../transports/__init__.py | 33 + .../transports/base.py | 306 ++ .../transports/grpc.py | 481 +++ .../transports/grpc_asyncio.py | 485 +++ .../cloud/dataproc_v1beta2/types/__init__.py | 197 ++ .../types/autoscaling_policies.py | 416 +++ .../cloud/dataproc_v1beta2/types/clusters.py | 1545 +++++++++ .../cloud/dataproc_v1beta2/types/jobs.py | 1364 ++++++++ .../dataproc_v1beta2/types/operations.py | 133 + .../cloud/dataproc_v1beta2/types/shared.py | 43 + .../types/workflow_templates.py | 1073 ++++++ owl-bot-staging/v1beta2/mypy.ini | 3 + owl-bot-staging/v1beta2/noxfile.py | 132 + .../fixup_dataproc_v1beta2_keywords.py | 200 ++ owl-bot-staging/v1beta2/setup.py | 53 + owl-bot-staging/v1beta2/tests/__init__.py | 16 + .../v1beta2/tests/unit/__init__.py | 16 + .../v1beta2/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/dataproc_v1beta2/__init__.py | 16 + .../test_autoscaling_policy_service.py | 2293 +++++++++++++ .../test_cluster_controller.py | 2258 +++++++++++++ .../dataproc_v1beta2/test_job_controller.py | 2371 ++++++++++++++ .../test_workflow_template_service.py | 2842 ++++++++++++++++ 136 files changed, 58318 insertions(+) create mode 100644 owl-bot-staging/v1/.coveragerc create mode 100644 owl-bot-staging/v1/MANIFEST.in create mode 100644 owl-bot-staging/v1/README.rst create mode 100644 owl-bot-staging/v1/docs/conf.py create mode 100644 owl-bot-staging/v1/docs/dataproc_v1/autoscaling_policy_service.rst create mode 100644 owl-bot-staging/v1/docs/dataproc_v1/cluster_controller.rst create mode 100644 owl-bot-staging/v1/docs/dataproc_v1/job_controller.rst create mode 100644 owl-bot-staging/v1/docs/dataproc_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/dataproc_v1/types.rst create mode 100644 owl-bot-staging/v1/docs/dataproc_v1/workflow_template_service.rst create mode 100644 owl-bot-staging/v1/docs/index.rst create mode 100644 owl-bot-staging/v1/google/cloud/dataproc/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/client.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/client.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/autoscaling_policies.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/clusters.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/jobs.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/operations.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/shared.py create mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/workflow_templates.py create mode 100644 owl-bot-staging/v1/mypy.ini create mode 100644 owl-bot-staging/v1/noxfile.py create mode 100644 owl-bot-staging/v1/scripts/fixup_dataproc_v1_keywords.py create mode 100644 owl-bot-staging/v1/setup.py create mode 100644 owl-bot-staging/v1/tests/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_cluster_controller.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_job_controller.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py create mode 100644 owl-bot-staging/v1beta2/.coveragerc create mode 100644 owl-bot-staging/v1beta2/MANIFEST.in create mode 100644 owl-bot-staging/v1beta2/README.rst create mode 100644 owl-bot-staging/v1beta2/docs/conf.py create mode 100644 owl-bot-staging/v1beta2/docs/dataproc_v1beta2/autoscaling_policy_service.rst create mode 100644 owl-bot-staging/v1beta2/docs/dataproc_v1beta2/cluster_controller.rst create mode 100644 owl-bot-staging/v1beta2/docs/dataproc_v1beta2/job_controller.rst create mode 100644 owl-bot-staging/v1beta2/docs/dataproc_v1beta2/services.rst create mode 100644 owl-bot-staging/v1beta2/docs/dataproc_v1beta2/types.rst create mode 100644 owl-bot-staging/v1beta2/docs/dataproc_v1beta2/workflow_template_service.rst create mode 100644 owl-bot-staging/v1beta2/docs/index.rst create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc/__init__.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc/py.typed create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/__init__.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/py.typed create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/__init__.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/__init__.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/client.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/__init__.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/clusters.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/jobs.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/operations.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/shared.py create mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/workflow_templates.py create mode 100644 owl-bot-staging/v1beta2/mypy.ini create mode 100644 owl-bot-staging/v1beta2/noxfile.py create mode 100644 owl-bot-staging/v1beta2/scripts/fixup_dataproc_v1beta2_keywords.py create mode 100644 owl-bot-staging/v1beta2/setup.py create mode 100644 owl-bot-staging/v1beta2/tests/__init__.py create mode 100644 owl-bot-staging/v1beta2/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1beta2/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/__init__.py create mode 100644 owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py create mode 100644 owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py create mode 100644 owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py create mode 100644 owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py diff --git a/owl-bot-staging/v1/.coveragerc b/owl-bot-staging/v1/.coveragerc new file mode 100644 index 00000000..240638d1 --- /dev/null +++ b/owl-bot-staging/v1/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/dataproc/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/MANIFEST.in b/owl-bot-staging/v1/MANIFEST.in new file mode 100644 index 00000000..425f6657 --- /dev/null +++ b/owl-bot-staging/v1/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/dataproc *.py +recursive-include google/cloud/dataproc_v1 *.py diff --git a/owl-bot-staging/v1/README.rst b/owl-bot-staging/v1/README.rst new file mode 100644 index 00000000..b751dfd9 --- /dev/null +++ b/owl-bot-staging/v1/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Dataproc API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Dataproc API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/docs/conf.py b/owl-bot-staging/v1/docs/conf.py new file mode 100644 index 00000000..02417582 --- /dev/null +++ b/owl-bot-staging/v1/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-dataproc documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-dataproc" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-dataproc-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-dataproc.tex", + u"google-cloud-dataproc Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-dataproc", + u"Google Cloud Dataproc Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-dataproc", + u"google-cloud-dataproc Documentation", + author, + "google-cloud-dataproc", + "GAPIC library for Google Cloud Dataproc API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/docs/dataproc_v1/autoscaling_policy_service.rst b/owl-bot-staging/v1/docs/dataproc_v1/autoscaling_policy_service.rst new file mode 100644 index 00000000..9b885c57 --- /dev/null +++ b/owl-bot-staging/v1/docs/dataproc_v1/autoscaling_policy_service.rst @@ -0,0 +1,10 @@ +AutoscalingPolicyService +------------------------------------------ + +.. automodule:: google.cloud.dataproc_v1.services.autoscaling_policy_service + :members: + :inherited-members: + +.. automodule:: google.cloud.dataproc_v1.services.autoscaling_policy_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/dataproc_v1/cluster_controller.rst b/owl-bot-staging/v1/docs/dataproc_v1/cluster_controller.rst new file mode 100644 index 00000000..d9b7f2ad --- /dev/null +++ b/owl-bot-staging/v1/docs/dataproc_v1/cluster_controller.rst @@ -0,0 +1,10 @@ +ClusterController +----------------------------------- + +.. automodule:: google.cloud.dataproc_v1.services.cluster_controller + :members: + :inherited-members: + +.. automodule:: google.cloud.dataproc_v1.services.cluster_controller.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/dataproc_v1/job_controller.rst b/owl-bot-staging/v1/docs/dataproc_v1/job_controller.rst new file mode 100644 index 00000000..5f14863b --- /dev/null +++ b/owl-bot-staging/v1/docs/dataproc_v1/job_controller.rst @@ -0,0 +1,10 @@ +JobController +------------------------------- + +.. automodule:: google.cloud.dataproc_v1.services.job_controller + :members: + :inherited-members: + +.. automodule:: google.cloud.dataproc_v1.services.job_controller.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/dataproc_v1/services.rst b/owl-bot-staging/v1/docs/dataproc_v1/services.rst new file mode 100644 index 00000000..9d91e7ce --- /dev/null +++ b/owl-bot-staging/v1/docs/dataproc_v1/services.rst @@ -0,0 +1,9 @@ +Services for Google Cloud Dataproc v1 API +========================================= +.. toctree:: + :maxdepth: 2 + + autoscaling_policy_service + cluster_controller + job_controller + workflow_template_service diff --git a/owl-bot-staging/v1/docs/dataproc_v1/types.rst b/owl-bot-staging/v1/docs/dataproc_v1/types.rst new file mode 100644 index 00000000..bc1a0a30 --- /dev/null +++ b/owl-bot-staging/v1/docs/dataproc_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Dataproc v1 API +====================================== + +.. automodule:: google.cloud.dataproc_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/docs/dataproc_v1/workflow_template_service.rst b/owl-bot-staging/v1/docs/dataproc_v1/workflow_template_service.rst new file mode 100644 index 00000000..0f301cee --- /dev/null +++ b/owl-bot-staging/v1/docs/dataproc_v1/workflow_template_service.rst @@ -0,0 +1,10 @@ +WorkflowTemplateService +----------------------------------------- + +.. automodule:: google.cloud.dataproc_v1.services.workflow_template_service + :members: + :inherited-members: + +.. automodule:: google.cloud.dataproc_v1.services.workflow_template_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/index.rst b/owl-bot-staging/v1/docs/index.rst new file mode 100644 index 00000000..3bf4df8b --- /dev/null +++ b/owl-bot-staging/v1/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + dataproc_v1/services + dataproc_v1/types diff --git a/owl-bot-staging/v1/google/cloud/dataproc/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc/__init__.py new file mode 100644 index 00000000..4ef0034e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc/__init__.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.dataproc_v1.services.autoscaling_policy_service.client import AutoscalingPolicyServiceClient +from google.cloud.dataproc_v1.services.autoscaling_policy_service.async_client import AutoscalingPolicyServiceAsyncClient +from google.cloud.dataproc_v1.services.cluster_controller.client import ClusterControllerClient +from google.cloud.dataproc_v1.services.cluster_controller.async_client import ClusterControllerAsyncClient +from google.cloud.dataproc_v1.services.job_controller.client import JobControllerClient +from google.cloud.dataproc_v1.services.job_controller.async_client import JobControllerAsyncClient +from google.cloud.dataproc_v1.services.workflow_template_service.client import WorkflowTemplateServiceClient +from google.cloud.dataproc_v1.services.workflow_template_service.async_client import WorkflowTemplateServiceAsyncClient + +from google.cloud.dataproc_v1.types.autoscaling_policies import AutoscalingPolicy +from google.cloud.dataproc_v1.types.autoscaling_policies import BasicAutoscalingAlgorithm +from google.cloud.dataproc_v1.types.autoscaling_policies import BasicYarnAutoscalingConfig +from google.cloud.dataproc_v1.types.autoscaling_policies import CreateAutoscalingPolicyRequest +from google.cloud.dataproc_v1.types.autoscaling_policies import DeleteAutoscalingPolicyRequest +from google.cloud.dataproc_v1.types.autoscaling_policies import GetAutoscalingPolicyRequest +from google.cloud.dataproc_v1.types.autoscaling_policies import InstanceGroupAutoscalingPolicyConfig +from google.cloud.dataproc_v1.types.autoscaling_policies import ListAutoscalingPoliciesRequest +from google.cloud.dataproc_v1.types.autoscaling_policies import ListAutoscalingPoliciesResponse +from google.cloud.dataproc_v1.types.autoscaling_policies import UpdateAutoscalingPolicyRequest +from google.cloud.dataproc_v1.types.clusters import AcceleratorConfig +from google.cloud.dataproc_v1.types.clusters import AutoscalingConfig +from google.cloud.dataproc_v1.types.clusters import Cluster +from google.cloud.dataproc_v1.types.clusters import ClusterConfig +from google.cloud.dataproc_v1.types.clusters import ClusterMetrics +from google.cloud.dataproc_v1.types.clusters import ClusterStatus +from google.cloud.dataproc_v1.types.clusters import CreateClusterRequest +from google.cloud.dataproc_v1.types.clusters import DeleteClusterRequest +from google.cloud.dataproc_v1.types.clusters import DiagnoseClusterRequest +from google.cloud.dataproc_v1.types.clusters import DiagnoseClusterResults +from google.cloud.dataproc_v1.types.clusters import DiskConfig +from google.cloud.dataproc_v1.types.clusters import EncryptionConfig +from google.cloud.dataproc_v1.types.clusters import EndpointConfig +from google.cloud.dataproc_v1.types.clusters import GceClusterConfig +from google.cloud.dataproc_v1.types.clusters import GetClusterRequest +from google.cloud.dataproc_v1.types.clusters import GkeClusterConfig +from google.cloud.dataproc_v1.types.clusters import IdentityConfig +from google.cloud.dataproc_v1.types.clusters import InstanceGroupConfig +from google.cloud.dataproc_v1.types.clusters import KerberosConfig +from google.cloud.dataproc_v1.types.clusters import LifecycleConfig +from google.cloud.dataproc_v1.types.clusters import ListClustersRequest +from google.cloud.dataproc_v1.types.clusters import ListClustersResponse +from google.cloud.dataproc_v1.types.clusters import ManagedGroupConfig +from google.cloud.dataproc_v1.types.clusters import MetastoreConfig +from google.cloud.dataproc_v1.types.clusters import NodeGroupAffinity +from google.cloud.dataproc_v1.types.clusters import NodeInitializationAction +from google.cloud.dataproc_v1.types.clusters import ReservationAffinity +from google.cloud.dataproc_v1.types.clusters import SecurityConfig +from google.cloud.dataproc_v1.types.clusters import ShieldedInstanceConfig +from google.cloud.dataproc_v1.types.clusters import SoftwareConfig +from google.cloud.dataproc_v1.types.clusters import StartClusterRequest +from google.cloud.dataproc_v1.types.clusters import StopClusterRequest +from google.cloud.dataproc_v1.types.clusters import UpdateClusterRequest +from google.cloud.dataproc_v1.types.jobs import CancelJobRequest +from google.cloud.dataproc_v1.types.jobs import DeleteJobRequest +from google.cloud.dataproc_v1.types.jobs import GetJobRequest +from google.cloud.dataproc_v1.types.jobs import HadoopJob +from google.cloud.dataproc_v1.types.jobs import HiveJob +from google.cloud.dataproc_v1.types.jobs import Job +from google.cloud.dataproc_v1.types.jobs import JobMetadata +from google.cloud.dataproc_v1.types.jobs import JobPlacement +from google.cloud.dataproc_v1.types.jobs import JobReference +from google.cloud.dataproc_v1.types.jobs import JobScheduling +from google.cloud.dataproc_v1.types.jobs import JobStatus +from google.cloud.dataproc_v1.types.jobs import ListJobsRequest +from google.cloud.dataproc_v1.types.jobs import ListJobsResponse +from google.cloud.dataproc_v1.types.jobs import LoggingConfig +from google.cloud.dataproc_v1.types.jobs import PigJob +from google.cloud.dataproc_v1.types.jobs import PrestoJob +from google.cloud.dataproc_v1.types.jobs import PySparkJob +from google.cloud.dataproc_v1.types.jobs import QueryList +from google.cloud.dataproc_v1.types.jobs import SparkJob +from google.cloud.dataproc_v1.types.jobs import SparkRJob +from google.cloud.dataproc_v1.types.jobs import SparkSqlJob +from google.cloud.dataproc_v1.types.jobs import SubmitJobRequest +from google.cloud.dataproc_v1.types.jobs import UpdateJobRequest +from google.cloud.dataproc_v1.types.jobs import YarnApplication +from google.cloud.dataproc_v1.types.operations import ClusterOperationMetadata +from google.cloud.dataproc_v1.types.operations import ClusterOperationStatus +from google.cloud.dataproc_v1.types.shared import Component +from google.cloud.dataproc_v1.types.workflow_templates import ClusterOperation +from google.cloud.dataproc_v1.types.workflow_templates import ClusterSelector +from google.cloud.dataproc_v1.types.workflow_templates import CreateWorkflowTemplateRequest +from google.cloud.dataproc_v1.types.workflow_templates import DeleteWorkflowTemplateRequest +from google.cloud.dataproc_v1.types.workflow_templates import GetWorkflowTemplateRequest +from google.cloud.dataproc_v1.types.workflow_templates import InstantiateInlineWorkflowTemplateRequest +from google.cloud.dataproc_v1.types.workflow_templates import InstantiateWorkflowTemplateRequest +from google.cloud.dataproc_v1.types.workflow_templates import ListWorkflowTemplatesRequest +from google.cloud.dataproc_v1.types.workflow_templates import ListWorkflowTemplatesResponse +from google.cloud.dataproc_v1.types.workflow_templates import ManagedCluster +from google.cloud.dataproc_v1.types.workflow_templates import OrderedJob +from google.cloud.dataproc_v1.types.workflow_templates import ParameterValidation +from google.cloud.dataproc_v1.types.workflow_templates import RegexValidation +from google.cloud.dataproc_v1.types.workflow_templates import TemplateParameter +from google.cloud.dataproc_v1.types.workflow_templates import UpdateWorkflowTemplateRequest +from google.cloud.dataproc_v1.types.workflow_templates import ValueValidation +from google.cloud.dataproc_v1.types.workflow_templates import WorkflowGraph +from google.cloud.dataproc_v1.types.workflow_templates import WorkflowMetadata +from google.cloud.dataproc_v1.types.workflow_templates import WorkflowNode +from google.cloud.dataproc_v1.types.workflow_templates import WorkflowTemplate +from google.cloud.dataproc_v1.types.workflow_templates import WorkflowTemplatePlacement + +__all__ = ('AutoscalingPolicyServiceClient', + 'AutoscalingPolicyServiceAsyncClient', + 'ClusterControllerClient', + 'ClusterControllerAsyncClient', + 'JobControllerClient', + 'JobControllerAsyncClient', + 'WorkflowTemplateServiceClient', + 'WorkflowTemplateServiceAsyncClient', + 'AutoscalingPolicy', + 'BasicAutoscalingAlgorithm', + 'BasicYarnAutoscalingConfig', + 'CreateAutoscalingPolicyRequest', + 'DeleteAutoscalingPolicyRequest', + 'GetAutoscalingPolicyRequest', + 'InstanceGroupAutoscalingPolicyConfig', + 'ListAutoscalingPoliciesRequest', + 'ListAutoscalingPoliciesResponse', + 'UpdateAutoscalingPolicyRequest', + 'AcceleratorConfig', + 'AutoscalingConfig', + 'Cluster', + 'ClusterConfig', + 'ClusterMetrics', + 'ClusterStatus', + 'CreateClusterRequest', + 'DeleteClusterRequest', + 'DiagnoseClusterRequest', + 'DiagnoseClusterResults', + 'DiskConfig', + 'EncryptionConfig', + 'EndpointConfig', + 'GceClusterConfig', + 'GetClusterRequest', + 'GkeClusterConfig', + 'IdentityConfig', + 'InstanceGroupConfig', + 'KerberosConfig', + 'LifecycleConfig', + 'ListClustersRequest', + 'ListClustersResponse', + 'ManagedGroupConfig', + 'MetastoreConfig', + 'NodeGroupAffinity', + 'NodeInitializationAction', + 'ReservationAffinity', + 'SecurityConfig', + 'ShieldedInstanceConfig', + 'SoftwareConfig', + 'StartClusterRequest', + 'StopClusterRequest', + 'UpdateClusterRequest', + 'CancelJobRequest', + 'DeleteJobRequest', + 'GetJobRequest', + 'HadoopJob', + 'HiveJob', + 'Job', + 'JobMetadata', + 'JobPlacement', + 'JobReference', + 'JobScheduling', + 'JobStatus', + 'ListJobsRequest', + 'ListJobsResponse', + 'LoggingConfig', + 'PigJob', + 'PrestoJob', + 'PySparkJob', + 'QueryList', + 'SparkJob', + 'SparkRJob', + 'SparkSqlJob', + 'SubmitJobRequest', + 'UpdateJobRequest', + 'YarnApplication', + 'ClusterOperationMetadata', + 'ClusterOperationStatus', + 'Component', + 'ClusterOperation', + 'ClusterSelector', + 'CreateWorkflowTemplateRequest', + 'DeleteWorkflowTemplateRequest', + 'GetWorkflowTemplateRequest', + 'InstantiateInlineWorkflowTemplateRequest', + 'InstantiateWorkflowTemplateRequest', + 'ListWorkflowTemplatesRequest', + 'ListWorkflowTemplatesResponse', + 'ManagedCluster', + 'OrderedJob', + 'ParameterValidation', + 'RegexValidation', + 'TemplateParameter', + 'UpdateWorkflowTemplateRequest', + 'ValueValidation', + 'WorkflowGraph', + 'WorkflowMetadata', + 'WorkflowNode', + 'WorkflowTemplate', + 'WorkflowTemplatePlacement', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc/py.typed b/owl-bot-staging/v1/google/cloud/dataproc/py.typed new file mode 100644 index 00000000..aac99cba --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-dataproc package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/__init__.py new file mode 100644 index 00000000..278cb5d9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/__init__.py @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .services.autoscaling_policy_service import AutoscalingPolicyServiceClient +from .services.autoscaling_policy_service import AutoscalingPolicyServiceAsyncClient +from .services.cluster_controller import ClusterControllerClient +from .services.cluster_controller import ClusterControllerAsyncClient +from .services.job_controller import JobControllerClient +from .services.job_controller import JobControllerAsyncClient +from .services.workflow_template_service import WorkflowTemplateServiceClient +from .services.workflow_template_service import WorkflowTemplateServiceAsyncClient + +from .types.autoscaling_policies import AutoscalingPolicy +from .types.autoscaling_policies import BasicAutoscalingAlgorithm +from .types.autoscaling_policies import BasicYarnAutoscalingConfig +from .types.autoscaling_policies import CreateAutoscalingPolicyRequest +from .types.autoscaling_policies import DeleteAutoscalingPolicyRequest +from .types.autoscaling_policies import GetAutoscalingPolicyRequest +from .types.autoscaling_policies import InstanceGroupAutoscalingPolicyConfig +from .types.autoscaling_policies import ListAutoscalingPoliciesRequest +from .types.autoscaling_policies import ListAutoscalingPoliciesResponse +from .types.autoscaling_policies import UpdateAutoscalingPolicyRequest +from .types.clusters import AcceleratorConfig +from .types.clusters import AutoscalingConfig +from .types.clusters import Cluster +from .types.clusters import ClusterConfig +from .types.clusters import ClusterMetrics +from .types.clusters import ClusterStatus +from .types.clusters import CreateClusterRequest +from .types.clusters import DeleteClusterRequest +from .types.clusters import DiagnoseClusterRequest +from .types.clusters import DiagnoseClusterResults +from .types.clusters import DiskConfig +from .types.clusters import EncryptionConfig +from .types.clusters import EndpointConfig +from .types.clusters import GceClusterConfig +from .types.clusters import GetClusterRequest +from .types.clusters import GkeClusterConfig +from .types.clusters import IdentityConfig +from .types.clusters import InstanceGroupConfig +from .types.clusters import KerberosConfig +from .types.clusters import LifecycleConfig +from .types.clusters import ListClustersRequest +from .types.clusters import ListClustersResponse +from .types.clusters import ManagedGroupConfig +from .types.clusters import MetastoreConfig +from .types.clusters import NodeGroupAffinity +from .types.clusters import NodeInitializationAction +from .types.clusters import ReservationAffinity +from .types.clusters import SecurityConfig +from .types.clusters import ShieldedInstanceConfig +from .types.clusters import SoftwareConfig +from .types.clusters import StartClusterRequest +from .types.clusters import StopClusterRequest +from .types.clusters import UpdateClusterRequest +from .types.jobs import CancelJobRequest +from .types.jobs import DeleteJobRequest +from .types.jobs import GetJobRequest +from .types.jobs import HadoopJob +from .types.jobs import HiveJob +from .types.jobs import Job +from .types.jobs import JobMetadata +from .types.jobs import JobPlacement +from .types.jobs import JobReference +from .types.jobs import JobScheduling +from .types.jobs import JobStatus +from .types.jobs import ListJobsRequest +from .types.jobs import ListJobsResponse +from .types.jobs import LoggingConfig +from .types.jobs import PigJob +from .types.jobs import PrestoJob +from .types.jobs import PySparkJob +from .types.jobs import QueryList +from .types.jobs import SparkJob +from .types.jobs import SparkRJob +from .types.jobs import SparkSqlJob +from .types.jobs import SubmitJobRequest +from .types.jobs import UpdateJobRequest +from .types.jobs import YarnApplication +from .types.operations import ClusterOperationMetadata +from .types.operations import ClusterOperationStatus +from .types.shared import Component +from .types.workflow_templates import ClusterOperation +from .types.workflow_templates import ClusterSelector +from .types.workflow_templates import CreateWorkflowTemplateRequest +from .types.workflow_templates import DeleteWorkflowTemplateRequest +from .types.workflow_templates import GetWorkflowTemplateRequest +from .types.workflow_templates import InstantiateInlineWorkflowTemplateRequest +from .types.workflow_templates import InstantiateWorkflowTemplateRequest +from .types.workflow_templates import ListWorkflowTemplatesRequest +from .types.workflow_templates import ListWorkflowTemplatesResponse +from .types.workflow_templates import ManagedCluster +from .types.workflow_templates import OrderedJob +from .types.workflow_templates import ParameterValidation +from .types.workflow_templates import RegexValidation +from .types.workflow_templates import TemplateParameter +from .types.workflow_templates import UpdateWorkflowTemplateRequest +from .types.workflow_templates import ValueValidation +from .types.workflow_templates import WorkflowGraph +from .types.workflow_templates import WorkflowMetadata +from .types.workflow_templates import WorkflowNode +from .types.workflow_templates import WorkflowTemplate +from .types.workflow_templates import WorkflowTemplatePlacement + +__all__ = ( + 'AutoscalingPolicyServiceAsyncClient', + 'ClusterControllerAsyncClient', + 'JobControllerAsyncClient', + 'WorkflowTemplateServiceAsyncClient', +'AcceleratorConfig', +'AutoscalingConfig', +'AutoscalingPolicy', +'AutoscalingPolicyServiceClient', +'BasicAutoscalingAlgorithm', +'BasicYarnAutoscalingConfig', +'CancelJobRequest', +'Cluster', +'ClusterConfig', +'ClusterControllerClient', +'ClusterMetrics', +'ClusterOperation', +'ClusterOperationMetadata', +'ClusterOperationStatus', +'ClusterSelector', +'ClusterStatus', +'Component', +'CreateAutoscalingPolicyRequest', +'CreateClusterRequest', +'CreateWorkflowTemplateRequest', +'DeleteAutoscalingPolicyRequest', +'DeleteClusterRequest', +'DeleteJobRequest', +'DeleteWorkflowTemplateRequest', +'DiagnoseClusterRequest', +'DiagnoseClusterResults', +'DiskConfig', +'EncryptionConfig', +'EndpointConfig', +'GceClusterConfig', +'GetAutoscalingPolicyRequest', +'GetClusterRequest', +'GetJobRequest', +'GetWorkflowTemplateRequest', +'GkeClusterConfig', +'HadoopJob', +'HiveJob', +'IdentityConfig', +'InstanceGroupAutoscalingPolicyConfig', +'InstanceGroupConfig', +'InstantiateInlineWorkflowTemplateRequest', +'InstantiateWorkflowTemplateRequest', +'Job', +'JobControllerClient', +'JobMetadata', +'JobPlacement', +'JobReference', +'JobScheduling', +'JobStatus', +'KerberosConfig', +'LifecycleConfig', +'ListAutoscalingPoliciesRequest', +'ListAutoscalingPoliciesResponse', +'ListClustersRequest', +'ListClustersResponse', +'ListJobsRequest', +'ListJobsResponse', +'ListWorkflowTemplatesRequest', +'ListWorkflowTemplatesResponse', +'LoggingConfig', +'ManagedCluster', +'ManagedGroupConfig', +'MetastoreConfig', +'NodeGroupAffinity', +'NodeInitializationAction', +'OrderedJob', +'ParameterValidation', +'PigJob', +'PrestoJob', +'PySparkJob', +'QueryList', +'RegexValidation', +'ReservationAffinity', +'SecurityConfig', +'ShieldedInstanceConfig', +'SoftwareConfig', +'SparkJob', +'SparkRJob', +'SparkSqlJob', +'StartClusterRequest', +'StopClusterRequest', +'SubmitJobRequest', +'TemplateParameter', +'UpdateAutoscalingPolicyRequest', +'UpdateClusterRequest', +'UpdateJobRequest', +'UpdateWorkflowTemplateRequest', +'ValueValidation', +'WorkflowGraph', +'WorkflowMetadata', +'WorkflowNode', +'WorkflowTemplate', +'WorkflowTemplatePlacement', +'WorkflowTemplateServiceClient', +'YarnApplication', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/dataproc_v1/gapic_metadata.json new file mode 100644 index 00000000..2d068a45 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/gapic_metadata.json @@ -0,0 +1,335 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.dataproc_v1", + "protoPackage": "google.cloud.dataproc.v1", + "schema": "1.0", + "services": { + "AutoscalingPolicyService": { + "clients": { + "grpc": { + "libraryClient": "AutoscalingPolicyServiceClient", + "rpcs": { + "CreateAutoscalingPolicy": { + "methods": [ + "create_autoscaling_policy" + ] + }, + "DeleteAutoscalingPolicy": { + "methods": [ + "delete_autoscaling_policy" + ] + }, + "GetAutoscalingPolicy": { + "methods": [ + "get_autoscaling_policy" + ] + }, + "ListAutoscalingPolicies": { + "methods": [ + "list_autoscaling_policies" + ] + }, + "UpdateAutoscalingPolicy": { + "methods": [ + "update_autoscaling_policy" + ] + } + } + }, + "grpc-async": { + "libraryClient": "AutoscalingPolicyServiceAsyncClient", + "rpcs": { + "CreateAutoscalingPolicy": { + "methods": [ + "create_autoscaling_policy" + ] + }, + "DeleteAutoscalingPolicy": { + "methods": [ + "delete_autoscaling_policy" + ] + }, + "GetAutoscalingPolicy": { + "methods": [ + "get_autoscaling_policy" + ] + }, + "ListAutoscalingPolicies": { + "methods": [ + "list_autoscaling_policies" + ] + }, + "UpdateAutoscalingPolicy": { + "methods": [ + "update_autoscaling_policy" + ] + } + } + } + } + }, + "ClusterController": { + "clients": { + "grpc": { + "libraryClient": "ClusterControllerClient", + "rpcs": { + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DiagnoseCluster": { + "methods": [ + "diagnose_cluster" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "StartCluster": { + "methods": [ + "start_cluster" + ] + }, + "StopCluster": { + "methods": [ + "stop_cluster" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ClusterControllerAsyncClient", + "rpcs": { + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DiagnoseCluster": { + "methods": [ + "diagnose_cluster" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "StartCluster": { + "methods": [ + "start_cluster" + ] + }, + "StopCluster": { + "methods": [ + "stop_cluster" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + } + } + } + } + }, + "JobController": { + "clients": { + "grpc": { + "libraryClient": "JobControllerClient", + "rpcs": { + "CancelJob": { + "methods": [ + "cancel_job" + ] + }, + "DeleteJob": { + "methods": [ + "delete_job" + ] + }, + "GetJob": { + "methods": [ + "get_job" + ] + }, + "ListJobs": { + "methods": [ + "list_jobs" + ] + }, + "SubmitJob": { + "methods": [ + "submit_job" + ] + }, + "SubmitJobAsOperation": { + "methods": [ + "submit_job_as_operation" + ] + }, + "UpdateJob": { + "methods": [ + "update_job" + ] + } + } + }, + "grpc-async": { + "libraryClient": "JobControllerAsyncClient", + "rpcs": { + "CancelJob": { + "methods": [ + "cancel_job" + ] + }, + "DeleteJob": { + "methods": [ + "delete_job" + ] + }, + "GetJob": { + "methods": [ + "get_job" + ] + }, + "ListJobs": { + "methods": [ + "list_jobs" + ] + }, + "SubmitJob": { + "methods": [ + "submit_job" + ] + }, + "SubmitJobAsOperation": { + "methods": [ + "submit_job_as_operation" + ] + }, + "UpdateJob": { + "methods": [ + "update_job" + ] + } + } + } + } + }, + "WorkflowTemplateService": { + "clients": { + "grpc": { + "libraryClient": "WorkflowTemplateServiceClient", + "rpcs": { + "CreateWorkflowTemplate": { + "methods": [ + "create_workflow_template" + ] + }, + "DeleteWorkflowTemplate": { + "methods": [ + "delete_workflow_template" + ] + }, + "GetWorkflowTemplate": { + "methods": [ + "get_workflow_template" + ] + }, + "InstantiateInlineWorkflowTemplate": { + "methods": [ + "instantiate_inline_workflow_template" + ] + }, + "InstantiateWorkflowTemplate": { + "methods": [ + "instantiate_workflow_template" + ] + }, + "ListWorkflowTemplates": { + "methods": [ + "list_workflow_templates" + ] + }, + "UpdateWorkflowTemplate": { + "methods": [ + "update_workflow_template" + ] + } + } + }, + "grpc-async": { + "libraryClient": "WorkflowTemplateServiceAsyncClient", + "rpcs": { + "CreateWorkflowTemplate": { + "methods": [ + "create_workflow_template" + ] + }, + "DeleteWorkflowTemplate": { + "methods": [ + "delete_workflow_template" + ] + }, + "GetWorkflowTemplate": { + "methods": [ + "get_workflow_template" + ] + }, + "InstantiateInlineWorkflowTemplate": { + "methods": [ + "instantiate_inline_workflow_template" + ] + }, + "InstantiateWorkflowTemplate": { + "methods": [ + "instantiate_workflow_template" + ] + }, + "ListWorkflowTemplates": { + "methods": [ + "list_workflow_templates" + ] + }, + "UpdateWorkflowTemplate": { + "methods": [ + "update_workflow_template" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/py.typed b/owl-bot-staging/v1/google/cloud/dataproc_v1/py.typed new file mode 100644 index 00000000..aac99cba --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-dataproc package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/__init__.py new file mode 100644 index 00000000..4de65971 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/__init__.py new file mode 100644 index 00000000..2401da6f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import AutoscalingPolicyServiceClient +from .async_client import AutoscalingPolicyServiceAsyncClient + +__all__ = ( + 'AutoscalingPolicyServiceClient', + 'AutoscalingPolicyServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py new file mode 100644 index 00000000..19463b1c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py @@ -0,0 +1,624 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1.services.autoscaling_policy_service import pagers +from google.cloud.dataproc_v1.types import autoscaling_policies +from .transports.base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport +from .client import AutoscalingPolicyServiceClient + + +class AutoscalingPolicyServiceAsyncClient: + """The API interface for managing autoscaling policies in the + Dataproc API. + """ + + _client: AutoscalingPolicyServiceClient + + DEFAULT_ENDPOINT = AutoscalingPolicyServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = AutoscalingPolicyServiceClient.DEFAULT_MTLS_ENDPOINT + + autoscaling_policy_path = staticmethod(AutoscalingPolicyServiceClient.autoscaling_policy_path) + parse_autoscaling_policy_path = staticmethod(AutoscalingPolicyServiceClient.parse_autoscaling_policy_path) + common_billing_account_path = staticmethod(AutoscalingPolicyServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(AutoscalingPolicyServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(AutoscalingPolicyServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_organization_path) + common_project_path = staticmethod(AutoscalingPolicyServiceClient.common_project_path) + parse_common_project_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_project_path) + common_location_path = staticmethod(AutoscalingPolicyServiceClient.common_location_path) + parse_common_location_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceAsyncClient: The constructed client. + """ + return AutoscalingPolicyServiceClient.from_service_account_info.__func__(AutoscalingPolicyServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceAsyncClient: The constructed client. + """ + return AutoscalingPolicyServiceClient.from_service_account_file.__func__(AutoscalingPolicyServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> AutoscalingPolicyServiceTransport: + """Returns the transport used by the client instance. + + Returns: + AutoscalingPolicyServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(AutoscalingPolicyServiceClient).get_transport_class, type(AutoscalingPolicyServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, AutoscalingPolicyServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the autoscaling policy service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AutoscalingPolicyServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = AutoscalingPolicyServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_autoscaling_policy(self, + request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, + *, + parent: str = None, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Creates new autoscaling policy. + + Args: + request (:class:`google.cloud.dataproc_v1.types.CreateAutoscalingPolicyRequest`): + The request object. A request to create an autoscaling + policy. + parent (:class:`str`): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.autoscalingPolicies.create``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + policy (:class:`google.cloud.dataproc_v1.types.AutoscalingPolicy`): + Required. The autoscaling policy to + create. + + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, policy]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = autoscaling_policies.CreateAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_autoscaling_policy, + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_autoscaling_policy(self, + request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, + *, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Args: + request (:class:`google.cloud.dataproc_v1.types.UpdateAutoscalingPolicyRequest`): + The request object. A request to update an autoscaling + policy. + policy (:class:`google.cloud.dataproc_v1.types.AutoscalingPolicy`): + Required. The updated autoscaling + policy. + + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([policy]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_autoscaling_policy, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("policy.name", request.policy.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_autoscaling_policy(self, + request: autoscaling_policies.GetAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Retrieves autoscaling policy. + + Args: + request (:class:`google.cloud.dataproc_v1.types.GetAutoscalingPolicyRequest`): + The request object. A request to fetch an autoscaling + policy. + name (:class:`str`): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.get``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = autoscaling_policies.GetAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_autoscaling_policy, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_autoscaling_policies(self, + request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAutoscalingPoliciesAsyncPager: + r"""Lists autoscaling policies in the project. + + Args: + request (:class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest`): + The request object. A request to list autoscaling + policies in a project. + parent (:class:`str`): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.list``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.services.autoscaling_policy_service.pagers.ListAutoscalingPoliciesAsyncPager: + A response to a request to list + autoscaling policies in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_autoscaling_policies, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAutoscalingPoliciesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_autoscaling_policy(self, + request: autoscaling_policies.DeleteAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Args: + request (:class:`google.cloud.dataproc_v1.types.DeleteAutoscalingPolicyRequest`): + The request object. A request to delete an autoscaling + policy. + Autoscaling policies in use by one or more clusters will + not be deleted. + name (:class:`str`): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For + ``projects.locations.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = autoscaling_policies.DeleteAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_autoscaling_policy, + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "AutoscalingPolicyServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py new file mode 100644 index 00000000..014ca1b3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py @@ -0,0 +1,790 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1.services.autoscaling_policy_service import pagers +from google.cloud.dataproc_v1.types import autoscaling_policies +from .transports.base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import AutoscalingPolicyServiceGrpcTransport +from .transports.grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport + + +class AutoscalingPolicyServiceClientMeta(type): + """Metaclass for the AutoscalingPolicyService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[AutoscalingPolicyServiceTransport]] + _transport_registry["grpc"] = AutoscalingPolicyServiceGrpcTransport + _transport_registry["grpc_asyncio"] = AutoscalingPolicyServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[AutoscalingPolicyServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AutoscalingPolicyServiceClient(metaclass=AutoscalingPolicyServiceClientMeta): + """The API interface for managing autoscaling policies in the + Dataproc API. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> AutoscalingPolicyServiceTransport: + """Returns the transport used by the client instance. + + Returns: + AutoscalingPolicyServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def autoscaling_policy_path(project: str,location: str,autoscaling_policy: str,) -> str: + """Returns a fully-qualified autoscaling_policy string.""" + return "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format(project=project, location=location, autoscaling_policy=autoscaling_policy, ) + + @staticmethod + def parse_autoscaling_policy_path(path: str) -> Dict[str,str]: + """Parses a autoscaling_policy path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/autoscalingPolicies/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, AutoscalingPolicyServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the autoscaling policy service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, AutoscalingPolicyServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, AutoscalingPolicyServiceTransport): + # transport is a AutoscalingPolicyServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_autoscaling_policy(self, + request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, + *, + parent: str = None, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Creates new autoscaling policy. + + Args: + request (google.cloud.dataproc_v1.types.CreateAutoscalingPolicyRequest): + The request object. A request to create an autoscaling + policy. + parent (str): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.autoscalingPolicies.create``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + policy (google.cloud.dataproc_v1.types.AutoscalingPolicy): + Required. The autoscaling policy to + create. + + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.CreateAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.CreateAutoscalingPolicyRequest): + request = autoscaling_policies.CreateAutoscalingPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_autoscaling_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_autoscaling_policy(self, + request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, + *, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Args: + request (google.cloud.dataproc_v1.types.UpdateAutoscalingPolicyRequest): + The request object. A request to update an autoscaling + policy. + policy (google.cloud.dataproc_v1.types.AutoscalingPolicy): + Required. The updated autoscaling + policy. + + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.UpdateAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.UpdateAutoscalingPolicyRequest): + request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_autoscaling_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("policy.name", request.policy.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_autoscaling_policy(self, + request: autoscaling_policies.GetAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Retrieves autoscaling policy. + + Args: + request (google.cloud.dataproc_v1.types.GetAutoscalingPolicyRequest): + The request object. A request to fetch an autoscaling + policy. + name (str): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.get``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.GetAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.GetAutoscalingPolicyRequest): + request = autoscaling_policies.GetAutoscalingPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_autoscaling_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_autoscaling_policies(self, + request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAutoscalingPoliciesPager: + r"""Lists autoscaling policies in the project. + + Args: + request (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest): + The request object. A request to list autoscaling + policies in a project. + parent (str): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.list``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.services.autoscaling_policy_service.pagers.ListAutoscalingPoliciesPager: + A response to a request to list + autoscaling policies in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.ListAutoscalingPoliciesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.ListAutoscalingPoliciesRequest): + request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_autoscaling_policies] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAutoscalingPoliciesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_autoscaling_policy(self, + request: autoscaling_policies.DeleteAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Args: + request (google.cloud.dataproc_v1.types.DeleteAutoscalingPolicyRequest): + The request object. A request to delete an autoscaling + policy. + Autoscaling policies in use by one or more clusters will + not be deleted. + name (str): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For + ``projects.locations.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.DeleteAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.DeleteAutoscalingPolicyRequest): + request = autoscaling_policies.DeleteAutoscalingPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_autoscaling_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "AutoscalingPolicyServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py new file mode 100644 index 00000000..938cb5e8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.dataproc_v1.types import autoscaling_policies + + +class ListAutoscalingPoliciesPager: + """A pager for iterating through ``list_autoscaling_policies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``policies`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAutoscalingPolicies`` requests and continue to iterate + through the ``policies`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., autoscaling_policies.ListAutoscalingPoliciesResponse], + request: autoscaling_policies.ListAutoscalingPoliciesRequest, + response: autoscaling_policies.ListAutoscalingPoliciesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest): + The initial request object. + response (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[autoscaling_policies.ListAutoscalingPoliciesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[autoscaling_policies.AutoscalingPolicy]: + for page in self.pages: + yield from page.policies + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListAutoscalingPoliciesAsyncPager: + """A pager for iterating through ``list_autoscaling_policies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``policies`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAutoscalingPolicies`` requests and continue to iterate + through the ``policies`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse]], + request: autoscaling_policies.ListAutoscalingPoliciesRequest, + response: autoscaling_policies.ListAutoscalingPoliciesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest): + The initial request object. + response (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[autoscaling_policies.ListAutoscalingPoliciesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[autoscaling_policies.AutoscalingPolicy]: + async def async_generator(): + async for page in self.pages: + for response in page.policies: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py new file mode 100644 index 00000000..55ea5b98 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import AutoscalingPolicyServiceTransport +from .grpc import AutoscalingPolicyServiceGrpcTransport +from .grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[AutoscalingPolicyServiceTransport]] +_transport_registry['grpc'] = AutoscalingPolicyServiceGrpcTransport +_transport_registry['grpc_asyncio'] = AutoscalingPolicyServiceGrpcAsyncIOTransport + +__all__ = ( + 'AutoscalingPolicyServiceTransport', + 'AutoscalingPolicyServiceGrpcTransport', + 'AutoscalingPolicyServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py new file mode 100644 index 00000000..0f56bce9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1.types import autoscaling_policies +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-dataproc', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class AutoscalingPolicyServiceTransport(abc.ABC): + """Abstract transport class for AutoscalingPolicyService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'dataproc.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_autoscaling_policy: gapic_v1.method.wrap_method( + self.create_autoscaling_policy, + default_timeout=600.0, + client_info=client_info, + ), + self.update_autoscaling_policy: gapic_v1.method.wrap_method( + self.update_autoscaling_policy, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.get_autoscaling_policy: gapic_v1.method.wrap_method( + self.get_autoscaling_policy, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.list_autoscaling_policies: gapic_v1.method.wrap_method( + self.list_autoscaling_policies, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.delete_autoscaling_policy: gapic_v1.method.wrap_method( + self.delete_autoscaling_policy, + default_timeout=600.0, + client_info=client_info, + ), + } + + @property + def create_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.CreateAutoscalingPolicyRequest], + Union[ + autoscaling_policies.AutoscalingPolicy, + Awaitable[autoscaling_policies.AutoscalingPolicy] + ]]: + raise NotImplementedError() + + @property + def update_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.UpdateAutoscalingPolicyRequest], + Union[ + autoscaling_policies.AutoscalingPolicy, + Awaitable[autoscaling_policies.AutoscalingPolicy] + ]]: + raise NotImplementedError() + + @property + def get_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.GetAutoscalingPolicyRequest], + Union[ + autoscaling_policies.AutoscalingPolicy, + Awaitable[autoscaling_policies.AutoscalingPolicy] + ]]: + raise NotImplementedError() + + @property + def list_autoscaling_policies(self) -> Callable[ + [autoscaling_policies.ListAutoscalingPoliciesRequest], + Union[ + autoscaling_policies.ListAutoscalingPoliciesResponse, + Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.DeleteAutoscalingPolicyRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'AutoscalingPolicyServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py new file mode 100644 index 00000000..8803e2ab --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py @@ -0,0 +1,363 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.dataproc_v1.types import autoscaling_policies +from google.protobuf import empty_pb2 # type: ignore +from .base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO + + +class AutoscalingPolicyServiceGrpcTransport(AutoscalingPolicyServiceTransport): + """gRPC backend transport for AutoscalingPolicyService. + + The API interface for managing autoscaling policies in the + Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def create_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.CreateAutoscalingPolicyRequest], + autoscaling_policies.AutoscalingPolicy]: + r"""Return a callable for the create autoscaling policy method over gRPC. + + Creates new autoscaling policy. + + Returns: + Callable[[~.CreateAutoscalingPolicyRequest], + ~.AutoscalingPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_autoscaling_policy' not in self._stubs: + self._stubs['create_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.AutoscalingPolicyService/CreateAutoscalingPolicy', + request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs['create_autoscaling_policy'] + + @property + def update_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.UpdateAutoscalingPolicyRequest], + autoscaling_policies.AutoscalingPolicy]: + r"""Return a callable for the update autoscaling policy method over gRPC. + + Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Returns: + Callable[[~.UpdateAutoscalingPolicyRequest], + ~.AutoscalingPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_autoscaling_policy' not in self._stubs: + self._stubs['update_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.AutoscalingPolicyService/UpdateAutoscalingPolicy', + request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs['update_autoscaling_policy'] + + @property + def get_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.GetAutoscalingPolicyRequest], + autoscaling_policies.AutoscalingPolicy]: + r"""Return a callable for the get autoscaling policy method over gRPC. + + Retrieves autoscaling policy. + + Returns: + Callable[[~.GetAutoscalingPolicyRequest], + ~.AutoscalingPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_autoscaling_policy' not in self._stubs: + self._stubs['get_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.AutoscalingPolicyService/GetAutoscalingPolicy', + request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs['get_autoscaling_policy'] + + @property + def list_autoscaling_policies(self) -> Callable[ + [autoscaling_policies.ListAutoscalingPoliciesRequest], + autoscaling_policies.ListAutoscalingPoliciesResponse]: + r"""Return a callable for the list autoscaling policies method over gRPC. + + Lists autoscaling policies in the project. + + Returns: + Callable[[~.ListAutoscalingPoliciesRequest], + ~.ListAutoscalingPoliciesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_autoscaling_policies' not in self._stubs: + self._stubs['list_autoscaling_policies'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.AutoscalingPolicyService/ListAutoscalingPolicies', + request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize, + response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize, + ) + return self._stubs['list_autoscaling_policies'] + + @property + def delete_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.DeleteAutoscalingPolicyRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete autoscaling policy method over gRPC. + + Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Returns: + Callable[[~.DeleteAutoscalingPolicyRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_autoscaling_policy' not in self._stubs: + self._stubs['delete_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.AutoscalingPolicyService/DeleteAutoscalingPolicy', + request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_autoscaling_policy'] + + +__all__ = ( + 'AutoscalingPolicyServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..65ce3c4c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py @@ -0,0 +1,367 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1.types import autoscaling_policies +from google.protobuf import empty_pb2 # type: ignore +from .base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import AutoscalingPolicyServiceGrpcTransport + + +class AutoscalingPolicyServiceGrpcAsyncIOTransport(AutoscalingPolicyServiceTransport): + """gRPC AsyncIO backend transport for AutoscalingPolicyService. + + The API interface for managing autoscaling policies in the + Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def create_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.CreateAutoscalingPolicyRequest], + Awaitable[autoscaling_policies.AutoscalingPolicy]]: + r"""Return a callable for the create autoscaling policy method over gRPC. + + Creates new autoscaling policy. + + Returns: + Callable[[~.CreateAutoscalingPolicyRequest], + Awaitable[~.AutoscalingPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_autoscaling_policy' not in self._stubs: + self._stubs['create_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.AutoscalingPolicyService/CreateAutoscalingPolicy', + request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs['create_autoscaling_policy'] + + @property + def update_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.UpdateAutoscalingPolicyRequest], + Awaitable[autoscaling_policies.AutoscalingPolicy]]: + r"""Return a callable for the update autoscaling policy method over gRPC. + + Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Returns: + Callable[[~.UpdateAutoscalingPolicyRequest], + Awaitable[~.AutoscalingPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_autoscaling_policy' not in self._stubs: + self._stubs['update_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.AutoscalingPolicyService/UpdateAutoscalingPolicy', + request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs['update_autoscaling_policy'] + + @property + def get_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.GetAutoscalingPolicyRequest], + Awaitable[autoscaling_policies.AutoscalingPolicy]]: + r"""Return a callable for the get autoscaling policy method over gRPC. + + Retrieves autoscaling policy. + + Returns: + Callable[[~.GetAutoscalingPolicyRequest], + Awaitable[~.AutoscalingPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_autoscaling_policy' not in self._stubs: + self._stubs['get_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.AutoscalingPolicyService/GetAutoscalingPolicy', + request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs['get_autoscaling_policy'] + + @property + def list_autoscaling_policies(self) -> Callable[ + [autoscaling_policies.ListAutoscalingPoliciesRequest], + Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse]]: + r"""Return a callable for the list autoscaling policies method over gRPC. + + Lists autoscaling policies in the project. + + Returns: + Callable[[~.ListAutoscalingPoliciesRequest], + Awaitable[~.ListAutoscalingPoliciesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_autoscaling_policies' not in self._stubs: + self._stubs['list_autoscaling_policies'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.AutoscalingPolicyService/ListAutoscalingPolicies', + request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize, + response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize, + ) + return self._stubs['list_autoscaling_policies'] + + @property + def delete_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.DeleteAutoscalingPolicyRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete autoscaling policy method over gRPC. + + Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Returns: + Callable[[~.DeleteAutoscalingPolicyRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_autoscaling_policy' not in self._stubs: + self._stubs['delete_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.AutoscalingPolicyService/DeleteAutoscalingPolicy', + request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_autoscaling_policy'] + + +__all__ = ( + 'AutoscalingPolicyServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/__init__.py new file mode 100644 index 00000000..4b4a11d5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ClusterControllerClient +from .async_client import ClusterControllerAsyncClient + +__all__ = ( + 'ClusterControllerClient', + 'ClusterControllerAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/async_client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/async_client.py new file mode 100644 index 00000000..99ffb764 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/async_client.py @@ -0,0 +1,1020 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1.services.cluster_controller import pagers +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import operations +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from .transports.base import ClusterControllerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport +from .client import ClusterControllerClient + + +class ClusterControllerAsyncClient: + """The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + """ + + _client: ClusterControllerClient + + DEFAULT_ENDPOINT = ClusterControllerClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ClusterControllerClient.DEFAULT_MTLS_ENDPOINT + + cluster_path = staticmethod(ClusterControllerClient.cluster_path) + parse_cluster_path = staticmethod(ClusterControllerClient.parse_cluster_path) + service_path = staticmethod(ClusterControllerClient.service_path) + parse_service_path = staticmethod(ClusterControllerClient.parse_service_path) + common_billing_account_path = staticmethod(ClusterControllerClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(ClusterControllerClient.parse_common_billing_account_path) + common_folder_path = staticmethod(ClusterControllerClient.common_folder_path) + parse_common_folder_path = staticmethod(ClusterControllerClient.parse_common_folder_path) + common_organization_path = staticmethod(ClusterControllerClient.common_organization_path) + parse_common_organization_path = staticmethod(ClusterControllerClient.parse_common_organization_path) + common_project_path = staticmethod(ClusterControllerClient.common_project_path) + parse_common_project_path = staticmethod(ClusterControllerClient.parse_common_project_path) + common_location_path = staticmethod(ClusterControllerClient.common_location_path) + parse_common_location_path = staticmethod(ClusterControllerClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerAsyncClient: The constructed client. + """ + return ClusterControllerClient.from_service_account_info.__func__(ClusterControllerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerAsyncClient: The constructed client. + """ + return ClusterControllerClient.from_service_account_file.__func__(ClusterControllerAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ClusterControllerTransport: + """Returns the transport used by the client instance. + + Returns: + ClusterControllerTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(ClusterControllerClient).get_transport_class, type(ClusterControllerClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, ClusterControllerTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the cluster controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ClusterControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ClusterControllerClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_cluster(self, + request: clusters.CreateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster: clusters.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`google.cloud.dataproc_v1.types.CreateClusterRequest`): + The request object. A request to create a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`google.cloud.dataproc_v1.types.Cluster`): + Required. The cluster to create. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = clusters.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def update_cluster(self, + request: clusters.UpdateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + cluster: clusters.Cluster = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`google.cloud.dataproc_v1.types.UpdateClusterRequest`): + The request object. A request to update a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project the cluster belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`google.cloud.dataproc_v1.types.Cluster`): + Required. The changes to the cluster. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Specifies the path, relative to ``Cluster``, + of the field to update. For example, to change the + number of workers in a cluster to 5, the ``update_mask`` + parameter would be specified as + ``config.worker_config.num_instances``, and the + ``PATCH`` request body would specify the new value, as + follows: + + :: + + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } + + Similarly, to change the number of preemptible workers + in a cluster to 5, the ``update_mask`` parameter would + be ``config.secondary_worker_config.num_instances``, and + the ``PATCH`` request body would be set as follows: + + :: + + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: Currently, only the following fields can be + updated: + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + +
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or + change autoscaling policies
+ + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name, cluster, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = clusters.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def stop_cluster(self, + request: clusters.StopClusterRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Stops a cluster in a project. + + Args: + request (:class:`google.cloud.dataproc_v1.types.StopClusterRequest`): + The request object. A request to stop a cluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + request = clusters.StopClusterRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.stop_cluster, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def start_cluster(self, + request: clusters.StartClusterRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Starts a cluster in a project. + + Args: + request (:class:`google.cloud.dataproc_v1.types.StartClusterRequest`): + The request object. A request to start a cluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + request = clusters.StartClusterRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.start_cluster, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_cluster(self, + request: clusters.DeleteClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`google.cloud.dataproc_v1.types.DeleteClusterRequest`): + The request object. A request to delete a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = clusters.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_cluster(self, + request: clusters.GetClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> clusters.Cluster: + r"""Gets the resource representation for a cluster in a + project. + + Args: + request (:class:`google.cloud.dataproc_v1.types.GetClusterRequest`): + The request object. Request to get the resource + representation for a cluster in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.Cluster: + Describes the identifying + information, config, and status of a + cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = clusters.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_clusters(self, + request: clusters.ListClustersRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListClustersAsyncPager: + r"""Lists all regions/{region}/clusters in a project + alphabetically. + + Args: + request (:class:`google.cloud.dataproc_v1.types.ListClustersRequest`): + The request object. A request to list the clusters in a + project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter constraining the clusters to list. + Filters are case-sensitive and have the following + syntax: + + field = value [AND [field = value]] ... + + where **field** is one of ``status.state``, + ``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a + label key. **value** can be ``*`` to match all values. + ``status.state`` can be one of the following: + ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, + ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` + contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` + states. ``INACTIVE`` contains the ``DELETING`` and + ``ERROR`` states. ``clusterName`` is the name of the + cluster provided at creation time. Only the logical + ``AND`` operator is supported; space-separated items are + treated as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND clusterName = mycluster AND + labels.env = staging AND labels.starred = \* + + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.services.cluster_controller.pagers.ListClustersAsyncPager: + The list of all clusters in a + project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, filter]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = clusters.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_clusters, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListClustersAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def diagnose_cluster(self, + request: clusters.DiagnoseClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains + `DiagnoseClusterResults `__. + + Args: + request (:class:`google.cloud.dataproc_v1.types.DiagnoseClusterRequest`): + The request object. A request to collect cluster + diagnostic information. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.dataproc_v1.types.DiagnoseClusterResults` + The location of diagnostic output. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = clusters.DiagnoseClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.diagnose_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.DiagnoseClusterResults, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ClusterControllerAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/client.py new file mode 100644 index 00000000..abf7a008 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/client.py @@ -0,0 +1,1178 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1.services.cluster_controller import pagers +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import operations +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from .transports.base import ClusterControllerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ClusterControllerGrpcTransport +from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport + + +class ClusterControllerClientMeta(type): + """Metaclass for the ClusterController client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ClusterControllerTransport]] + _transport_registry["grpc"] = ClusterControllerGrpcTransport + _transport_registry["grpc_asyncio"] = ClusterControllerGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ClusterControllerTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ClusterControllerClient(metaclass=ClusterControllerClientMeta): + """The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ClusterControllerTransport: + """Returns the transport used by the client instance. + + Returns: + ClusterControllerTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def cluster_path(project: str,location: str,cluster: str,) -> str: + """Returns a fully-qualified cluster string.""" + return "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str,str]: + """Parses a cluster path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def service_path(project: str,location: str,service: str,) -> str: + """Returns a fully-qualified service string.""" + return "projects/{project}/locations/{location}/services/{service}".format(project=project, location=location, service=service, ) + + @staticmethod + def parse_service_path(path: str) -> Dict[str,str]: + """Parses a service path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/services/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ClusterControllerTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the cluster controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ClusterControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ClusterControllerTransport): + # transport is a ClusterControllerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_cluster(self, + request: clusters.CreateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster: clusters.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (google.cloud.dataproc_v1.types.CreateClusterRequest): + The request object. A request to create a cluster. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (google.cloud.dataproc_v1.types.Cluster): + Required. The cluster to create. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.CreateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.CreateClusterRequest): + request = clusters.CreateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_cluster] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def update_cluster(self, + request: clusters.UpdateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + cluster: clusters.Cluster = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (google.cloud.dataproc_v1.types.UpdateClusterRequest): + The request object. A request to update a cluster. + project_id (str): + Required. The ID of the Google Cloud + Platform project the cluster belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (str): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (google.cloud.dataproc_v1.types.Cluster): + Required. The changes to the cluster. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Specifies the path, relative to ``Cluster``, + of the field to update. For example, to change the + number of workers in a cluster to 5, the ``update_mask`` + parameter would be specified as + ``config.worker_config.num_instances``, and the + ``PATCH`` request body would specify the new value, as + follows: + + :: + + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } + + Similarly, to change the number of preemptible workers + in a cluster to 5, the ``update_mask`` parameter would + be ``config.secondary_worker_config.num_instances``, and + the ``PATCH`` request body would be set as follows: + + :: + + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: Currently, only the following fields can be + updated: + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + +
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or + change autoscaling policies
+ + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name, cluster, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.UpdateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.UpdateClusterRequest): + request = clusters.UpdateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cluster] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def stop_cluster(self, + request: clusters.StopClusterRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Stops a cluster in a project. + + Args: + request (google.cloud.dataproc_v1.types.StopClusterRequest): + The request object. A request to stop a cluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a clusters.StopClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.StopClusterRequest): + request = clusters.StopClusterRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.stop_cluster] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def start_cluster(self, + request: clusters.StartClusterRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Starts a cluster in a project. + + Args: + request (google.cloud.dataproc_v1.types.StartClusterRequest): + The request object. A request to start a cluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a clusters.StartClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.StartClusterRequest): + request = clusters.StartClusterRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.start_cluster] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_cluster(self, + request: clusters.DeleteClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (google.cloud.dataproc_v1.types.DeleteClusterRequest): + The request object. A request to delete a cluster. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (str): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.DeleteClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.DeleteClusterRequest): + request = clusters.DeleteClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def get_cluster(self, + request: clusters.GetClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> clusters.Cluster: + r"""Gets the resource representation for a cluster in a + project. + + Args: + request (google.cloud.dataproc_v1.types.GetClusterRequest): + The request object. Request to get the resource + representation for a cluster in a project. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (str): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.Cluster: + Describes the identifying + information, config, and status of a + cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.GetClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.GetClusterRequest): + request = clusters.GetClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cluster] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_clusters(self, + request: clusters.ListClustersRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListClustersPager: + r"""Lists all regions/{region}/clusters in a project + alphabetically. + + Args: + request (google.cloud.dataproc_v1.types.ListClustersRequest): + The request object. A request to list the clusters in a + project. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (str): + Optional. A filter constraining the clusters to list. + Filters are case-sensitive and have the following + syntax: + + field = value [AND [field = value]] ... + + where **field** is one of ``status.state``, + ``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a + label key. **value** can be ``*`` to match all values. + ``status.state`` can be one of the following: + ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, + ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` + contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` + states. ``INACTIVE`` contains the ``DELETING`` and + ``ERROR`` states. ``clusterName`` is the name of the + cluster provided at creation time. Only the logical + ``AND`` operator is supported; space-separated items are + treated as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND clusterName = mycluster AND + labels.env = staging AND labels.starred = \* + + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.services.cluster_controller.pagers.ListClustersPager: + The list of all clusters in a + project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, filter]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.ListClustersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.ListClustersRequest): + request = clusters.ListClustersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_clusters] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListClustersPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def diagnose_cluster(self, + request: clusters.DiagnoseClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains + `DiagnoseClusterResults `__. + + Args: + request (google.cloud.dataproc_v1.types.DiagnoseClusterRequest): + The request object. A request to collect cluster + diagnostic information. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (str): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.dataproc_v1.types.DiagnoseClusterResults` + The location of diagnostic output. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.DiagnoseClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.DiagnoseClusterRequest): + request = clusters.DiagnoseClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.diagnose_cluster] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.DiagnoseClusterResults, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ClusterControllerClient", +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/pagers.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/pagers.py new file mode 100644 index 00000000..9afbfb8e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.dataproc_v1.types import clusters + + +class ListClustersPager: + """A pager for iterating through ``list_clusters`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1.types.ListClustersResponse` object, and + provides an ``__iter__`` method to iterate through its + ``clusters`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListClusters`` requests and continue to iterate + through the ``clusters`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1.types.ListClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., clusters.ListClustersResponse], + request: clusters.ListClustersRequest, + response: clusters.ListClustersResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1.types.ListClustersRequest): + The initial request object. + response (google.cloud.dataproc_v1.types.ListClustersResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = clusters.ListClustersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[clusters.ListClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[clusters.Cluster]: + for page in self.pages: + yield from page.clusters + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListClustersAsyncPager: + """A pager for iterating through ``list_clusters`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1.types.ListClustersResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``clusters`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListClusters`` requests and continue to iterate + through the ``clusters`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1.types.ListClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[clusters.ListClustersResponse]], + request: clusters.ListClustersRequest, + response: clusters.ListClustersResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1.types.ListClustersRequest): + The initial request object. + response (google.cloud.dataproc_v1.types.ListClustersResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = clusters.ListClustersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[clusters.ListClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[clusters.Cluster]: + async def async_generator(): + async for page in self.pages: + for response in page.clusters: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py new file mode 100644 index 00000000..9c44d271 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ClusterControllerTransport +from .grpc import ClusterControllerGrpcTransport +from .grpc_asyncio import ClusterControllerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterControllerTransport]] +_transport_registry['grpc'] = ClusterControllerGrpcTransport +_transport_registry['grpc_asyncio'] = ClusterControllerGrpcAsyncIOTransport + +__all__ = ( + 'ClusterControllerTransport', + 'ClusterControllerGrpcTransport', + 'ClusterControllerGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py new file mode 100644 index 00000000..6e5fd590 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py @@ -0,0 +1,313 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1.types import clusters +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-dataproc', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class ClusterControllerTransport(abc.ABC): + """Abstract transport class for ClusterController.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'dataproc.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=client_info, + ), + self.stop_cluster: gapic_v1.method.wrap_method( + self.stop_cluster, + default_timeout=None, + client_info=client_info, + ), + self.start_cluster: gapic_v1.method.wrap_method( + self.start_cluster, + default_timeout=None, + client_info=client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=client_info, + ), + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=client_info, + ), + self.diagnose_cluster: gapic_v1.method.wrap_method( + self.diagnose_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_cluster(self) -> Callable[ + [clusters.CreateClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def update_cluster(self) -> Callable[ + [clusters.UpdateClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def stop_cluster(self) -> Callable[ + [clusters.StopClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def start_cluster(self) -> Callable[ + [clusters.StartClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_cluster(self) -> Callable[ + [clusters.DeleteClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_cluster(self) -> Callable[ + [clusters.GetClusterRequest], + Union[ + clusters.Cluster, + Awaitable[clusters.Cluster] + ]]: + raise NotImplementedError() + + @property + def list_clusters(self) -> Callable[ + [clusters.ListClustersRequest], + Union[ + clusters.ListClustersResponse, + Awaitable[clusters.ListClustersResponse] + ]]: + raise NotImplementedError() + + @property + def diagnose_cluster(self) -> Callable[ + [clusters.DiagnoseClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ClusterControllerTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py new file mode 100644 index 00000000..e176b377 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py @@ -0,0 +1,472 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.dataproc_v1.types import clusters +from google.longrunning import operations_pb2 # type: ignore +from .base import ClusterControllerTransport, DEFAULT_CLIENT_INFO + + +class ClusterControllerGrpcTransport(ClusterControllerTransport): + """gRPC backend transport for ClusterController. + + The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_cluster(self) -> Callable[ + [clusters.CreateClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_cluster' not in self._stubs: + self._stubs['create_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/CreateCluster', + request_serializer=clusters.CreateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_cluster'] + + @property + def update_cluster(self) -> Callable[ + [clusters.UpdateClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.UpdateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_cluster' not in self._stubs: + self._stubs['update_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/UpdateCluster', + request_serializer=clusters.UpdateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_cluster'] + + @property + def stop_cluster(self) -> Callable[ + [clusters.StopClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the stop cluster method over gRPC. + + Stops a cluster in a project. + + Returns: + Callable[[~.StopClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'stop_cluster' not in self._stubs: + self._stubs['stop_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/StopCluster', + request_serializer=clusters.StopClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['stop_cluster'] + + @property + def start_cluster(self) -> Callable[ + [clusters.StartClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the start cluster method over gRPC. + + Starts a cluster in a project. + + Returns: + Callable[[~.StartClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'start_cluster' not in self._stubs: + self._stubs['start_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/StartCluster', + request_serializer=clusters.StartClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['start_cluster'] + + @property + def delete_cluster(self) -> Callable[ + [clusters.DeleteClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_cluster' not in self._stubs: + self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/DeleteCluster', + request_serializer=clusters.DeleteClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_cluster'] + + @property + def get_cluster(self) -> Callable[ + [clusters.GetClusterRequest], + clusters.Cluster]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the resource representation for a cluster in a + project. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_cluster' not in self._stubs: + self._stubs['get_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/GetCluster', + request_serializer=clusters.GetClusterRequest.serialize, + response_deserializer=clusters.Cluster.deserialize, + ) + return self._stubs['get_cluster'] + + @property + def list_clusters(self) -> Callable[ + [clusters.ListClustersRequest], + clusters.ListClustersResponse]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all regions/{region}/clusters in a project + alphabetically. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_clusters' not in self._stubs: + self._stubs['list_clusters'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/ListClusters', + request_serializer=clusters.ListClustersRequest.serialize, + response_deserializer=clusters.ListClustersResponse.deserialize, + ) + return self._stubs['list_clusters'] + + @property + def diagnose_cluster(self) -> Callable[ + [clusters.DiagnoseClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the diagnose cluster method over gRPC. + + Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains + `DiagnoseClusterResults `__. + + Returns: + Callable[[~.DiagnoseClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'diagnose_cluster' not in self._stubs: + self._stubs['diagnose_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster', + request_serializer=clusters.DiagnoseClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['diagnose_cluster'] + + +__all__ = ( + 'ClusterControllerGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py new file mode 100644 index 00000000..fc9b453b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py @@ -0,0 +1,476 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1.types import clusters +from google.longrunning import operations_pb2 # type: ignore +from .base import ClusterControllerTransport, DEFAULT_CLIENT_INFO +from .grpc import ClusterControllerGrpcTransport + + +class ClusterControllerGrpcAsyncIOTransport(ClusterControllerTransport): + """gRPC AsyncIO backend transport for ClusterController. + + The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_cluster(self) -> Callable[ + [clusters.CreateClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_cluster' not in self._stubs: + self._stubs['create_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/CreateCluster', + request_serializer=clusters.CreateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_cluster'] + + @property + def update_cluster(self) -> Callable[ + [clusters.UpdateClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.UpdateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_cluster' not in self._stubs: + self._stubs['update_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/UpdateCluster', + request_serializer=clusters.UpdateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_cluster'] + + @property + def stop_cluster(self) -> Callable[ + [clusters.StopClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the stop cluster method over gRPC. + + Stops a cluster in a project. + + Returns: + Callable[[~.StopClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'stop_cluster' not in self._stubs: + self._stubs['stop_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/StopCluster', + request_serializer=clusters.StopClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['stop_cluster'] + + @property + def start_cluster(self) -> Callable[ + [clusters.StartClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the start cluster method over gRPC. + + Starts a cluster in a project. + + Returns: + Callable[[~.StartClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'start_cluster' not in self._stubs: + self._stubs['start_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/StartCluster', + request_serializer=clusters.StartClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['start_cluster'] + + @property + def delete_cluster(self) -> Callable[ + [clusters.DeleteClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_cluster' not in self._stubs: + self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/DeleteCluster', + request_serializer=clusters.DeleteClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_cluster'] + + @property + def get_cluster(self) -> Callable[ + [clusters.GetClusterRequest], + Awaitable[clusters.Cluster]]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the resource representation for a cluster in a + project. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_cluster' not in self._stubs: + self._stubs['get_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/GetCluster', + request_serializer=clusters.GetClusterRequest.serialize, + response_deserializer=clusters.Cluster.deserialize, + ) + return self._stubs['get_cluster'] + + @property + def list_clusters(self) -> Callable[ + [clusters.ListClustersRequest], + Awaitable[clusters.ListClustersResponse]]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all regions/{region}/clusters in a project + alphabetically. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_clusters' not in self._stubs: + self._stubs['list_clusters'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/ListClusters', + request_serializer=clusters.ListClustersRequest.serialize, + response_deserializer=clusters.ListClustersResponse.deserialize, + ) + return self._stubs['list_clusters'] + + @property + def diagnose_cluster(self) -> Callable[ + [clusters.DiagnoseClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the diagnose cluster method over gRPC. + + Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains + `DiagnoseClusterResults `__. + + Returns: + Callable[[~.DiagnoseClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'diagnose_cluster' not in self._stubs: + self._stubs['diagnose_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster', + request_serializer=clusters.DiagnoseClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['diagnose_cluster'] + + +__all__ = ( + 'ClusterControllerGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/__init__.py new file mode 100644 index 00000000..19ac5a98 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import JobControllerClient +from .async_client import JobControllerAsyncClient + +__all__ = ( + 'JobControllerClient', + 'JobControllerAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/async_client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/async_client.py new file mode 100644 index 00000000..0fe3a2d0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/async_client.py @@ -0,0 +1,796 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1.services.job_controller import pagers +from google.cloud.dataproc_v1.types import jobs +from .transports.base import JobControllerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport +from .client import JobControllerClient + + +class JobControllerAsyncClient: + """The JobController provides methods to manage jobs.""" + + _client: JobControllerClient + + DEFAULT_ENDPOINT = JobControllerClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = JobControllerClient.DEFAULT_MTLS_ENDPOINT + + common_billing_account_path = staticmethod(JobControllerClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(JobControllerClient.parse_common_billing_account_path) + common_folder_path = staticmethod(JobControllerClient.common_folder_path) + parse_common_folder_path = staticmethod(JobControllerClient.parse_common_folder_path) + common_organization_path = staticmethod(JobControllerClient.common_organization_path) + parse_common_organization_path = staticmethod(JobControllerClient.parse_common_organization_path) + common_project_path = staticmethod(JobControllerClient.common_project_path) + parse_common_project_path = staticmethod(JobControllerClient.parse_common_project_path) + common_location_path = staticmethod(JobControllerClient.common_location_path) + parse_common_location_path = staticmethod(JobControllerClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerAsyncClient: The constructed client. + """ + return JobControllerClient.from_service_account_info.__func__(JobControllerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerAsyncClient: The constructed client. + """ + return JobControllerClient.from_service_account_file.__func__(JobControllerAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> JobControllerTransport: + """Returns the transport used by the client instance. + + Returns: + JobControllerTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(JobControllerClient).get_transport_class, type(JobControllerClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, JobControllerTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the job controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.JobControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = JobControllerClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def submit_job(self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Submits a job to a cluster. + + Args: + request (:class:`google.cloud.dataproc_v1.types.SubmitJobRequest`): + The request object. A request to submit a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (:class:`google.cloud.dataproc_v1.types.Job`): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = jobs.SubmitJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.submit_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def submit_job_as_operation(self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Submits job to a cluster. + + Args: + request (:class:`google.cloud.dataproc_v1.types.SubmitJobRequest`): + The request object. A request to submit a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (:class:`google.cloud.dataproc_v1.types.Job`): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.dataproc_v1.types.Job` A Dataproc + job resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = jobs.SubmitJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.submit_job_as_operation, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + jobs.Job, + metadata_type=jobs.JobMetadata, + ) + + # Done; return the response. + return response + + async def get_job(self, + request: jobs.GetJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Gets the resource representation for a job in a + project. + + Args: + request (:class:`google.cloud.dataproc_v1.types.GetJobRequest`): + The request object. A request to get the resource + representation for a job in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = jobs.GetJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_jobs(self, + request: jobs.ListJobsRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListJobsAsyncPager: + r"""Lists regions/{region}/jobs in a project. + + Args: + request (:class:`google.cloud.dataproc_v1.types.ListJobsRequest`): + The request object. A request to list jobs in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter constraining the jobs to list. + Filters are case-sensitive and have the following + syntax: + + [field = value] AND [field [= value]] ... + + where **field** is ``status.state`` or ``labels.[KEY]``, + and ``[KEY]`` is a label key. **value** can be ``*`` to + match all values. ``status.state`` can be either + ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` + operator is supported; space-separated items are treated + as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND labels.env = staging AND + labels.starred = \* + + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.services.job_controller.pagers.ListJobsAsyncPager: + A list of jobs in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, filter]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = jobs.ListJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_jobs, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_job(self, + request: jobs.UpdateJobRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Updates a job in a project. + + Args: + request (:class:`google.cloud.dataproc_v1.types.UpdateJobRequest`): + The request object. A request to update a job. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + request = jobs.UpdateJobRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def cancel_job(self, + request: jobs.CancelJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Args: + request (:class:`google.cloud.dataproc_v1.types.CancelJobRequest`): + The request object. A request to cancel a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = jobs.CancelJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_job(self, + request: jobs.DeleteJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Args: + request (:class:`google.cloud.dataproc_v1.types.DeleteJobRequest`): + The request object. A request to delete a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = jobs.DeleteJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "JobControllerAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/client.py new file mode 100644 index 00000000..42b2cc43 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/client.py @@ -0,0 +1,927 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1.services.job_controller import pagers +from google.cloud.dataproc_v1.types import jobs +from .transports.base import JobControllerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import JobControllerGrpcTransport +from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport + + +class JobControllerClientMeta(type): + """Metaclass for the JobController client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]] + _transport_registry["grpc"] = JobControllerGrpcTransport + _transport_registry["grpc_asyncio"] = JobControllerGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[JobControllerTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class JobControllerClient(metaclass=JobControllerClientMeta): + """The JobController provides methods to manage jobs.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> JobControllerTransport: + """Returns the transport used by the client instance. + + Returns: + JobControllerTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, JobControllerTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the job controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, JobControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, JobControllerTransport): + # transport is a JobControllerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def submit_job(self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Submits a job to a cluster. + + Args: + request (google.cloud.dataproc_v1.types.SubmitJobRequest): + The request object. A request to submit a job. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (google.cloud.dataproc_v1.types.Job): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.SubmitJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.SubmitJobRequest): + request = jobs.SubmitJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.submit_job] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def submit_job_as_operation(self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Submits job to a cluster. + + Args: + request (google.cloud.dataproc_v1.types.SubmitJobRequest): + The request object. A request to submit a job. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (google.cloud.dataproc_v1.types.Job): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.dataproc_v1.types.Job` A Dataproc + job resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.SubmitJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.SubmitJobRequest): + request = jobs.SubmitJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.submit_job_as_operation] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + jobs.Job, + metadata_type=jobs.JobMetadata, + ) + + # Done; return the response. + return response + + def get_job(self, + request: jobs.GetJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Gets the resource representation for a job in a + project. + + Args: + request (google.cloud.dataproc_v1.types.GetJobRequest): + The request object. A request to get the resource + representation for a job in a project. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (str): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.GetJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.GetJobRequest): + request = jobs.GetJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_job] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_jobs(self, + request: jobs.ListJobsRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListJobsPager: + r"""Lists regions/{region}/jobs in a project. + + Args: + request (google.cloud.dataproc_v1.types.ListJobsRequest): + The request object. A request to list jobs in a project. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (str): + Optional. A filter constraining the jobs to list. + Filters are case-sensitive and have the following + syntax: + + [field = value] AND [field [= value]] ... + + where **field** is ``status.state`` or ``labels.[KEY]``, + and ``[KEY]`` is a label key. **value** can be ``*`` to + match all values. ``status.state`` can be either + ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` + operator is supported; space-separated items are treated + as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND labels.env = staging AND + labels.starred = \* + + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.services.job_controller.pagers.ListJobsPager: + A list of jobs in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, filter]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.ListJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.ListJobsRequest): + request = jobs.ListJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_jobs] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_job(self, + request: jobs.UpdateJobRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Updates a job in a project. + + Args: + request (google.cloud.dataproc_v1.types.UpdateJobRequest): + The request object. A request to update a job. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a jobs.UpdateJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.UpdateJobRequest): + request = jobs.UpdateJobRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_job] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def cancel_job(self, + request: jobs.CancelJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Args: + request (google.cloud.dataproc_v1.types.CancelJobRequest): + The request object. A request to cancel a job. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (str): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.CancelJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.CancelJobRequest): + request = jobs.CancelJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_job] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_job(self, + request: jobs.DeleteJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Args: + request (google.cloud.dataproc_v1.types.DeleteJobRequest): + The request object. A request to delete a job. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (str): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.DeleteJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.DeleteJobRequest): + request = jobs.DeleteJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_job] + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "JobControllerClient", +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/pagers.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/pagers.py new file mode 100644 index 00000000..0143144a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.dataproc_v1.types import jobs + + +class ListJobsPager: + """A pager for iterating through ``list_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1.types.ListJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListJobs`` requests and continue to iterate + through the ``jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1.types.ListJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., jobs.ListJobsResponse], + request: jobs.ListJobsRequest, + response: jobs.ListJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1.types.ListJobsRequest): + The initial request object. + response (google.cloud.dataproc_v1.types.ListJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = jobs.ListJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[jobs.ListJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[jobs.Job]: + for page in self.pages: + yield from page.jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListJobsAsyncPager: + """A pager for iterating through ``list_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1.types.ListJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListJobs`` requests and continue to iterate + through the ``jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1.types.ListJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[jobs.ListJobsResponse]], + request: jobs.ListJobsRequest, + response: jobs.ListJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1.types.ListJobsRequest): + The initial request object. + response (google.cloud.dataproc_v1.types.ListJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = jobs.ListJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[jobs.ListJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[jobs.Job]: + async def async_generator(): + async for page in self.pages: + for response in page.jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py new file mode 100644 index 00000000..b35119f2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import JobControllerTransport +from .grpc import JobControllerGrpcTransport +from .grpc_asyncio import JobControllerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]] +_transport_registry['grpc'] = JobControllerGrpcTransport +_transport_registry['grpc_asyncio'] = JobControllerGrpcAsyncIOTransport + +__all__ = ( + 'JobControllerTransport', + 'JobControllerGrpcTransport', + 'JobControllerGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/base.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/base.py new file mode 100644 index 00000000..f9ccafad --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/base.py @@ -0,0 +1,308 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1.types import jobs +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-dataproc', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class JobControllerTransport(abc.ABC): + """Abstract transport class for JobController.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'dataproc.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.submit_job: gapic_v1.method.wrap_method( + self.submit_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + self.submit_job_as_operation: gapic_v1.method.wrap_method( + self.submit_job_as_operation, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + self.get_job: gapic_v1.method.wrap_method( + self.get_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + self.list_jobs: gapic_v1.method.wrap_method( + self.list_jobs, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + self.update_job: gapic_v1.method.wrap_method( + self.update_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + self.cancel_job: gapic_v1.method.wrap_method( + self.cancel_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + self.delete_job: gapic_v1.method.wrap_method( + self.delete_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def submit_job(self) -> Callable[ + [jobs.SubmitJobRequest], + Union[ + jobs.Job, + Awaitable[jobs.Job] + ]]: + raise NotImplementedError() + + @property + def submit_job_as_operation(self) -> Callable[ + [jobs.SubmitJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_job(self) -> Callable[ + [jobs.GetJobRequest], + Union[ + jobs.Job, + Awaitable[jobs.Job] + ]]: + raise NotImplementedError() + + @property + def list_jobs(self) -> Callable[ + [jobs.ListJobsRequest], + Union[ + jobs.ListJobsResponse, + Awaitable[jobs.ListJobsResponse] + ]]: + raise NotImplementedError() + + @property + def update_job(self) -> Callable[ + [jobs.UpdateJobRequest], + Union[ + jobs.Job, + Awaitable[jobs.Job] + ]]: + raise NotImplementedError() + + @property + def cancel_job(self) -> Callable[ + [jobs.CancelJobRequest], + Union[ + jobs.Job, + Awaitable[jobs.Job] + ]]: + raise NotImplementedError() + + @property + def delete_job(self) -> Callable[ + [jobs.DeleteJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'JobControllerTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py new file mode 100644 index 00000000..8a46774c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py @@ -0,0 +1,434 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.dataproc_v1.types import jobs +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import JobControllerTransport, DEFAULT_CLIENT_INFO + + +class JobControllerGrpcTransport(JobControllerTransport): + """gRPC backend transport for JobController. + + The JobController provides methods to manage jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def submit_job(self) -> Callable[ + [jobs.SubmitJobRequest], + jobs.Job]: + r"""Return a callable for the submit job method over gRPC. + + Submits a job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'submit_job' not in self._stubs: + self._stubs['submit_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/SubmitJob', + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['submit_job'] + + @property + def submit_job_as_operation(self) -> Callable[ + [jobs.SubmitJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the submit job as operation method over gRPC. + + Submits job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'submit_job_as_operation' not in self._stubs: + self._stubs['submit_job_as_operation'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/SubmitJobAsOperation', + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['submit_job_as_operation'] + + @property + def get_job(self) -> Callable[ + [jobs.GetJobRequest], + jobs.Job]: + r"""Return a callable for the get job method over gRPC. + + Gets the resource representation for a job in a + project. + + Returns: + Callable[[~.GetJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_job' not in self._stubs: + self._stubs['get_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/GetJob', + request_serializer=jobs.GetJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['get_job'] + + @property + def list_jobs(self) -> Callable[ + [jobs.ListJobsRequest], + jobs.ListJobsResponse]: + r"""Return a callable for the list jobs method over gRPC. + + Lists regions/{region}/jobs in a project. + + Returns: + Callable[[~.ListJobsRequest], + ~.ListJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_jobs' not in self._stubs: + self._stubs['list_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/ListJobs', + request_serializer=jobs.ListJobsRequest.serialize, + response_deserializer=jobs.ListJobsResponse.deserialize, + ) + return self._stubs['list_jobs'] + + @property + def update_job(self) -> Callable[ + [jobs.UpdateJobRequest], + jobs.Job]: + r"""Return a callable for the update job method over gRPC. + + Updates a job in a project. + + Returns: + Callable[[~.UpdateJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_job' not in self._stubs: + self._stubs['update_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/UpdateJob', + request_serializer=jobs.UpdateJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['update_job'] + + @property + def cancel_job(self) -> Callable[ + [jobs.CancelJobRequest], + jobs.Job]: + r"""Return a callable for the cancel job method over gRPC. + + Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Returns: + Callable[[~.CancelJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_job' not in self._stubs: + self._stubs['cancel_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/CancelJob', + request_serializer=jobs.CancelJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['cancel_job'] + + @property + def delete_job(self) -> Callable[ + [jobs.DeleteJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete job method over gRPC. + + Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Returns: + Callable[[~.DeleteJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_job' not in self._stubs: + self._stubs['delete_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/DeleteJob', + request_serializer=jobs.DeleteJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_job'] + + +__all__ = ( + 'JobControllerGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py new file mode 100644 index 00000000..3c979b2c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py @@ -0,0 +1,438 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1.types import jobs +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import JobControllerTransport, DEFAULT_CLIENT_INFO +from .grpc import JobControllerGrpcTransport + + +class JobControllerGrpcAsyncIOTransport(JobControllerTransport): + """gRPC AsyncIO backend transport for JobController. + + The JobController provides methods to manage jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def submit_job(self) -> Callable[ + [jobs.SubmitJobRequest], + Awaitable[jobs.Job]]: + r"""Return a callable for the submit job method over gRPC. + + Submits a job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'submit_job' not in self._stubs: + self._stubs['submit_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/SubmitJob', + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['submit_job'] + + @property + def submit_job_as_operation(self) -> Callable[ + [jobs.SubmitJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the submit job as operation method over gRPC. + + Submits job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'submit_job_as_operation' not in self._stubs: + self._stubs['submit_job_as_operation'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/SubmitJobAsOperation', + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['submit_job_as_operation'] + + @property + def get_job(self) -> Callable[ + [jobs.GetJobRequest], + Awaitable[jobs.Job]]: + r"""Return a callable for the get job method over gRPC. + + Gets the resource representation for a job in a + project. + + Returns: + Callable[[~.GetJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_job' not in self._stubs: + self._stubs['get_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/GetJob', + request_serializer=jobs.GetJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['get_job'] + + @property + def list_jobs(self) -> Callable[ + [jobs.ListJobsRequest], + Awaitable[jobs.ListJobsResponse]]: + r"""Return a callable for the list jobs method over gRPC. + + Lists regions/{region}/jobs in a project. + + Returns: + Callable[[~.ListJobsRequest], + Awaitable[~.ListJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_jobs' not in self._stubs: + self._stubs['list_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/ListJobs', + request_serializer=jobs.ListJobsRequest.serialize, + response_deserializer=jobs.ListJobsResponse.deserialize, + ) + return self._stubs['list_jobs'] + + @property + def update_job(self) -> Callable[ + [jobs.UpdateJobRequest], + Awaitable[jobs.Job]]: + r"""Return a callable for the update job method over gRPC. + + Updates a job in a project. + + Returns: + Callable[[~.UpdateJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_job' not in self._stubs: + self._stubs['update_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/UpdateJob', + request_serializer=jobs.UpdateJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['update_job'] + + @property + def cancel_job(self) -> Callable[ + [jobs.CancelJobRequest], + Awaitable[jobs.Job]]: + r"""Return a callable for the cancel job method over gRPC. + + Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Returns: + Callable[[~.CancelJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_job' not in self._stubs: + self._stubs['cancel_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/CancelJob', + request_serializer=jobs.CancelJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['cancel_job'] + + @property + def delete_job(self) -> Callable[ + [jobs.DeleteJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete job method over gRPC. + + Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Returns: + Callable[[~.DeleteJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_job' not in self._stubs: + self._stubs['delete_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.JobController/DeleteJob', + request_serializer=jobs.DeleteJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_job'] + + +__all__ = ( + 'JobControllerGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/__init__.py new file mode 100644 index 00000000..1dd621e9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import WorkflowTemplateServiceClient +from .async_client import WorkflowTemplateServiceAsyncClient + +__all__ = ( + 'WorkflowTemplateServiceClient', + 'WorkflowTemplateServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py new file mode 100644 index 00000000..55f3a1e9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py @@ -0,0 +1,945 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1.services.workflow_template_service import pagers +from google.cloud.dataproc_v1.types import workflow_templates +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport +from .client import WorkflowTemplateServiceClient + + +class WorkflowTemplateServiceAsyncClient: + """The API interface for managing Workflow Templates in the + Dataproc API. + """ + + _client: WorkflowTemplateServiceClient + + DEFAULT_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_MTLS_ENDPOINT + + cluster_path = staticmethod(WorkflowTemplateServiceClient.cluster_path) + parse_cluster_path = staticmethod(WorkflowTemplateServiceClient.parse_cluster_path) + service_path = staticmethod(WorkflowTemplateServiceClient.service_path) + parse_service_path = staticmethod(WorkflowTemplateServiceClient.parse_service_path) + workflow_template_path = staticmethod(WorkflowTemplateServiceClient.workflow_template_path) + parse_workflow_template_path = staticmethod(WorkflowTemplateServiceClient.parse_workflow_template_path) + common_billing_account_path = staticmethod(WorkflowTemplateServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(WorkflowTemplateServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(WorkflowTemplateServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(WorkflowTemplateServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(WorkflowTemplateServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(WorkflowTemplateServiceClient.parse_common_organization_path) + common_project_path = staticmethod(WorkflowTemplateServiceClient.common_project_path) + parse_common_project_path = staticmethod(WorkflowTemplateServiceClient.parse_common_project_path) + common_location_path = staticmethod(WorkflowTemplateServiceClient.common_location_path) + parse_common_location_path = staticmethod(WorkflowTemplateServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceAsyncClient: The constructed client. + """ + return WorkflowTemplateServiceClient.from_service_account_info.__func__(WorkflowTemplateServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceAsyncClient: The constructed client. + """ + return WorkflowTemplateServiceClient.from_service_account_file.__func__(WorkflowTemplateServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> WorkflowTemplateServiceTransport: + """Returns the transport used by the client instance. + + Returns: + WorkflowTemplateServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(WorkflowTemplateServiceClient).get_transport_class, type(WorkflowTemplateServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, WorkflowTemplateServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the workflow template service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.WorkflowTemplateServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = WorkflowTemplateServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_workflow_template(self, + request: workflow_templates.CreateWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Creates new workflow template. + + Args: + request (:class:`google.cloud.dataproc_v1.types.CreateWorkflowTemplateRequest`): + The request object. A request to create a workflow + template. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (:class:`google.cloud.dataproc_v1.types.WorkflowTemplate`): + Required. The Dataproc workflow + template to create. + + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, template]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.CreateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_workflow_template(self, + request: workflow_templates.GetWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Args: + request (:class:`google.cloud.dataproc_v1.types.GetWorkflowTemplateRequest`): + The request object. A request to fetch a workflow + template. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.GetWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def instantiate_workflow_template(self, + request: workflow_templates.InstantiateWorkflowTemplateRequest = None, + *, + name: str = None, + parameters: Sequence[workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (:class:`google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest`): + The request object. A request to instantiate a workflow + template. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`Sequence[google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest.ParametersEntry]`): + Optional. Map from parameter names to + values that should be used for those + parameters. Values may not exceed 1000 + characters. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, parameters]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.InstantiateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + if parameters: + request.parameters.update(parameters) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.instantiate_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + async def instantiate_inline_workflow_template(self, + request: workflow_templates.InstantiateInlineWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (:class:`google.cloud.dataproc_v1.types.InstantiateInlineWorkflowTemplateRequest`): + The request object. A request to instantiate an inline + workflow template. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates,instantiateinline``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.workflowTemplates.instantiateinline``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (:class:`google.cloud.dataproc_v1.types.WorkflowTemplate`): + Required. The workflow template to + instantiate. + + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, template]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.instantiate_inline_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + async def update_workflow_template(self, + request: workflow_templates.UpdateWorkflowTemplateRequest = None, + *, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Args: + request (:class:`google.cloud.dataproc_v1.types.UpdateWorkflowTemplateRequest`): + The request object. A request to update a workflow + template. + template (:class:`google.cloud.dataproc_v1.types.WorkflowTemplate`): + Required. The updated workflow template. + + The ``template.version`` field must match the current + version. + + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([template]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.UpdateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("template.name", request.template.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_workflow_templates(self, + request: workflow_templates.ListWorkflowTemplatesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListWorkflowTemplatesAsyncPager: + r"""Lists workflows that match the specified filter in + the request. + + Args: + request (:class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest`): + The request object. A request to list workflow templates + in a project. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.services.workflow_template_service.pagers.ListWorkflowTemplatesAsyncPager: + A response to a request to list + workflow templates in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.ListWorkflowTemplatesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_workflow_templates, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListWorkflowTemplatesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_workflow_template(self, + request: workflow_templates.DeleteWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a workflow template. It does not cancel in- + rogress workflows. + + Args: + request (:class:`google.cloud.dataproc_v1.types.DeleteWorkflowTemplateRequest`): + The request object. A request to delete a workflow + template. + Currently started workflows will remain running. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.delete``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.DeleteWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "WorkflowTemplateServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/client.py new file mode 100644 index 00000000..7713f026 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/client.py @@ -0,0 +1,1103 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1.services.workflow_template_service import pagers +from google.cloud.dataproc_v1.types import workflow_templates +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import WorkflowTemplateServiceGrpcTransport +from .transports.grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport + + +class WorkflowTemplateServiceClientMeta(type): + """Metaclass for the WorkflowTemplateService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[WorkflowTemplateServiceTransport]] + _transport_registry["grpc"] = WorkflowTemplateServiceGrpcTransport + _transport_registry["grpc_asyncio"] = WorkflowTemplateServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[WorkflowTemplateServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class WorkflowTemplateServiceClient(metaclass=WorkflowTemplateServiceClientMeta): + """The API interface for managing Workflow Templates in the + Dataproc API. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> WorkflowTemplateServiceTransport: + """Returns the transport used by the client instance. + + Returns: + WorkflowTemplateServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def cluster_path(project: str,location: str,cluster: str,) -> str: + """Returns a fully-qualified cluster string.""" + return "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str,str]: + """Parses a cluster path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def service_path(project: str,location: str,service: str,) -> str: + """Returns a fully-qualified service string.""" + return "projects/{project}/locations/{location}/services/{service}".format(project=project, location=location, service=service, ) + + @staticmethod + def parse_service_path(path: str) -> Dict[str,str]: + """Parses a service path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/services/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def workflow_template_path(project: str,region: str,workflow_template: str,) -> str: + """Returns a fully-qualified workflow_template string.""" + return "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format(project=project, region=region, workflow_template=workflow_template, ) + + @staticmethod + def parse_workflow_template_path(path: str) -> Dict[str,str]: + """Parses a workflow_template path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/regions/(?P.+?)/workflowTemplates/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, WorkflowTemplateServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the workflow template service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, WorkflowTemplateServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, WorkflowTemplateServiceTransport): + # transport is a WorkflowTemplateServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_workflow_template(self, + request: workflow_templates.CreateWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Creates new workflow template. + + Args: + request (google.cloud.dataproc_v1.types.CreateWorkflowTemplateRequest): + The request object. A request to create a workflow + template. + parent (str): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (google.cloud.dataproc_v1.types.WorkflowTemplate): + Required. The Dataproc workflow + template to create. + + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, template]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.CreateWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.CreateWorkflowTemplateRequest): + request = workflow_templates.CreateWorkflowTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_workflow_template(self, + request: workflow_templates.GetWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Args: + request (google.cloud.dataproc_v1.types.GetWorkflowTemplateRequest): + The request object. A request to fetch a workflow + template. + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.GetWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.GetWorkflowTemplateRequest): + request = workflow_templates.GetWorkflowTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def instantiate_workflow_template(self, + request: workflow_templates.InstantiateWorkflowTemplateRequest = None, + *, + name: str = None, + parameters: Sequence[workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest): + The request object. A request to instantiate a workflow + template. + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (Sequence[google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest.ParametersEntry]): + Optional. Map from parameter names to + values that should be used for those + parameters. Values may not exceed 1000 + characters. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, parameters]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.InstantiateWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.InstantiateWorkflowTemplateRequest): + request = workflow_templates.InstantiateWorkflowTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if parameters is not None: + request.parameters = parameters + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.instantiate_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + def instantiate_inline_workflow_template(self, + request: workflow_templates.InstantiateInlineWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (google.cloud.dataproc_v1.types.InstantiateInlineWorkflowTemplateRequest): + The request object. A request to instantiate an inline + workflow template. + parent (str): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates,instantiateinline``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.workflowTemplates.instantiateinline``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (google.cloud.dataproc_v1.types.WorkflowTemplate): + Required. The workflow template to + instantiate. + + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, template]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.InstantiateInlineWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.InstantiateInlineWorkflowTemplateRequest): + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.instantiate_inline_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + def update_workflow_template(self, + request: workflow_templates.UpdateWorkflowTemplateRequest = None, + *, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Args: + request (google.cloud.dataproc_v1.types.UpdateWorkflowTemplateRequest): + The request object. A request to update a workflow + template. + template (google.cloud.dataproc_v1.types.WorkflowTemplate): + Required. The updated workflow template. + + The ``template.version`` field must match the current + version. + + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([template]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.UpdateWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.UpdateWorkflowTemplateRequest): + request = workflow_templates.UpdateWorkflowTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("template.name", request.template.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_workflow_templates(self, + request: workflow_templates.ListWorkflowTemplatesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListWorkflowTemplatesPager: + r"""Lists workflows that match the specified filter in + the request. + + Args: + request (google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest): + The request object. A request to list workflow templates + in a project. + parent (str): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.services.workflow_template_service.pagers.ListWorkflowTemplatesPager: + A response to a request to list + workflow templates in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.ListWorkflowTemplatesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.ListWorkflowTemplatesRequest): + request = workflow_templates.ListWorkflowTemplatesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_workflow_templates] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListWorkflowTemplatesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_workflow_template(self, + request: workflow_templates.DeleteWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a workflow template. It does not cancel in- + rogress workflows. + + Args: + request (google.cloud.dataproc_v1.types.DeleteWorkflowTemplateRequest): + The request object. A request to delete a workflow + template. + Currently started workflows will remain running. + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.delete``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.DeleteWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.DeleteWorkflowTemplateRequest): + request = workflow_templates.DeleteWorkflowTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "WorkflowTemplateServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py new file mode 100644 index 00000000..2da3d2c9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.dataproc_v1.types import workflow_templates + + +class ListWorkflowTemplatesPager: + """A pager for iterating through ``list_workflow_templates`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``templates`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListWorkflowTemplates`` requests and continue to iterate + through the ``templates`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., workflow_templates.ListWorkflowTemplatesResponse], + request: workflow_templates.ListWorkflowTemplatesRequest, + response: workflow_templates.ListWorkflowTemplatesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest): + The initial request object. + response (google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = workflow_templates.ListWorkflowTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[workflow_templates.ListWorkflowTemplatesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[workflow_templates.WorkflowTemplate]: + for page in self.pages: + yield from page.templates + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListWorkflowTemplatesAsyncPager: + """A pager for iterating through ``list_workflow_templates`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``templates`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListWorkflowTemplates`` requests and continue to iterate + through the ``templates`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[workflow_templates.ListWorkflowTemplatesResponse]], + request: workflow_templates.ListWorkflowTemplatesRequest, + response: workflow_templates.ListWorkflowTemplatesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest): + The initial request object. + response (google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = workflow_templates.ListWorkflowTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[workflow_templates.ListWorkflowTemplatesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[workflow_templates.WorkflowTemplate]: + async def async_generator(): + async for page in self.pages: + for response in page.templates: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py new file mode 100644 index 00000000..96efd4cb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import WorkflowTemplateServiceTransport +from .grpc import WorkflowTemplateServiceGrpcTransport +from .grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[WorkflowTemplateServiceTransport]] +_transport_registry['grpc'] = WorkflowTemplateServiceGrpcTransport +_transport_registry['grpc_asyncio'] = WorkflowTemplateServiceGrpcAsyncIOTransport + +__all__ = ( + 'WorkflowTemplateServiceTransport', + 'WorkflowTemplateServiceGrpcTransport', + 'WorkflowTemplateServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py new file mode 100644 index 00000000..fef8a855 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py @@ -0,0 +1,306 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1.types import workflow_templates +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-dataproc', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class WorkflowTemplateServiceTransport(abc.ABC): + """Abstract transport class for WorkflowTemplateService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'dataproc.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_workflow_template: gapic_v1.method.wrap_method( + self.create_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.get_workflow_template: gapic_v1.method.wrap_method( + self.get_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.instantiate_workflow_template: gapic_v1.method.wrap_method( + self.instantiate_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.instantiate_inline_workflow_template: gapic_v1.method.wrap_method( + self.instantiate_inline_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.update_workflow_template: gapic_v1.method.wrap_method( + self.update_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.list_workflow_templates: gapic_v1.method.wrap_method( + self.list_workflow_templates, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.delete_workflow_template: gapic_v1.method.wrap_method( + self.delete_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_workflow_template(self) -> Callable[ + [workflow_templates.CreateWorkflowTemplateRequest], + Union[ + workflow_templates.WorkflowTemplate, + Awaitable[workflow_templates.WorkflowTemplate] + ]]: + raise NotImplementedError() + + @property + def get_workflow_template(self) -> Callable[ + [workflow_templates.GetWorkflowTemplateRequest], + Union[ + workflow_templates.WorkflowTemplate, + Awaitable[workflow_templates.WorkflowTemplate] + ]]: + raise NotImplementedError() + + @property + def instantiate_workflow_template(self) -> Callable[ + [workflow_templates.InstantiateWorkflowTemplateRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def instantiate_inline_workflow_template(self) -> Callable[ + [workflow_templates.InstantiateInlineWorkflowTemplateRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def update_workflow_template(self) -> Callable[ + [workflow_templates.UpdateWorkflowTemplateRequest], + Union[ + workflow_templates.WorkflowTemplate, + Awaitable[workflow_templates.WorkflowTemplate] + ]]: + raise NotImplementedError() + + @property + def list_workflow_templates(self) -> Callable[ + [workflow_templates.ListWorkflowTemplatesRequest], + Union[ + workflow_templates.ListWorkflowTemplatesResponse, + Awaitable[workflow_templates.ListWorkflowTemplatesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_workflow_template(self) -> Callable[ + [workflow_templates.DeleteWorkflowTemplateRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'WorkflowTemplateServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py new file mode 100644 index 00000000..c4f896aa --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py @@ -0,0 +1,481 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.dataproc_v1.types import workflow_templates +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO + + +class WorkflowTemplateServiceGrpcTransport(WorkflowTemplateServiceTransport): + """gRPC backend transport for WorkflowTemplateService. + + The API interface for managing Workflow Templates in the + Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_workflow_template(self) -> Callable[ + [workflow_templates.CreateWorkflowTemplateRequest], + workflow_templates.WorkflowTemplate]: + r"""Return a callable for the create workflow template method over gRPC. + + Creates new workflow template. + + Returns: + Callable[[~.CreateWorkflowTemplateRequest], + ~.WorkflowTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_workflow_template' not in self._stubs: + self._stubs['create_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/CreateWorkflowTemplate', + request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs['create_workflow_template'] + + @property + def get_workflow_template(self) -> Callable[ + [workflow_templates.GetWorkflowTemplateRequest], + workflow_templates.WorkflowTemplate]: + r"""Return a callable for the get workflow template method over gRPC. + + Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Returns: + Callable[[~.GetWorkflowTemplateRequest], + ~.WorkflowTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_workflow_template' not in self._stubs: + self._stubs['get_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/GetWorkflowTemplate', + request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs['get_workflow_template'] + + @property + def instantiate_workflow_template(self) -> Callable[ + [workflow_templates.InstantiateWorkflowTemplateRequest], + operations_pb2.Operation]: + r"""Return a callable for the instantiate workflow template method over gRPC. + + Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateWorkflowTemplateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'instantiate_workflow_template' not in self._stubs: + self._stubs['instantiate_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateWorkflowTemplate', + request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['instantiate_workflow_template'] + + @property + def instantiate_inline_workflow_template(self) -> Callable[ + [workflow_templates.InstantiateInlineWorkflowTemplateRequest], + operations_pb2.Operation]: + r"""Return a callable for the instantiate inline workflow + template method over gRPC. + + Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateInlineWorkflowTemplateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'instantiate_inline_workflow_template' not in self._stubs: + self._stubs['instantiate_inline_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateInlineWorkflowTemplate', + request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['instantiate_inline_workflow_template'] + + @property + def update_workflow_template(self) -> Callable[ + [workflow_templates.UpdateWorkflowTemplateRequest], + workflow_templates.WorkflowTemplate]: + r"""Return a callable for the update workflow template method over gRPC. + + Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Returns: + Callable[[~.UpdateWorkflowTemplateRequest], + ~.WorkflowTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_workflow_template' not in self._stubs: + self._stubs['update_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/UpdateWorkflowTemplate', + request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs['update_workflow_template'] + + @property + def list_workflow_templates(self) -> Callable[ + [workflow_templates.ListWorkflowTemplatesRequest], + workflow_templates.ListWorkflowTemplatesResponse]: + r"""Return a callable for the list workflow templates method over gRPC. + + Lists workflows that match the specified filter in + the request. + + Returns: + Callable[[~.ListWorkflowTemplatesRequest], + ~.ListWorkflowTemplatesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_workflow_templates' not in self._stubs: + self._stubs['list_workflow_templates'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/ListWorkflowTemplates', + request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize, + response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize, + ) + return self._stubs['list_workflow_templates'] + + @property + def delete_workflow_template(self) -> Callable[ + [workflow_templates.DeleteWorkflowTemplateRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete workflow template method over gRPC. + + Deletes a workflow template. It does not cancel in- + rogress workflows. + + Returns: + Callable[[~.DeleteWorkflowTemplateRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_workflow_template' not in self._stubs: + self._stubs['delete_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/DeleteWorkflowTemplate', + request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_workflow_template'] + + +__all__ = ( + 'WorkflowTemplateServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..6783c33d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py @@ -0,0 +1,485 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1.types import workflow_templates +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import WorkflowTemplateServiceGrpcTransport + + +class WorkflowTemplateServiceGrpcAsyncIOTransport(WorkflowTemplateServiceTransport): + """gRPC AsyncIO backend transport for WorkflowTemplateService. + + The API interface for managing Workflow Templates in the + Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_workflow_template(self) -> Callable[ + [workflow_templates.CreateWorkflowTemplateRequest], + Awaitable[workflow_templates.WorkflowTemplate]]: + r"""Return a callable for the create workflow template method over gRPC. + + Creates new workflow template. + + Returns: + Callable[[~.CreateWorkflowTemplateRequest], + Awaitable[~.WorkflowTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_workflow_template' not in self._stubs: + self._stubs['create_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/CreateWorkflowTemplate', + request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs['create_workflow_template'] + + @property + def get_workflow_template(self) -> Callable[ + [workflow_templates.GetWorkflowTemplateRequest], + Awaitable[workflow_templates.WorkflowTemplate]]: + r"""Return a callable for the get workflow template method over gRPC. + + Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Returns: + Callable[[~.GetWorkflowTemplateRequest], + Awaitable[~.WorkflowTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_workflow_template' not in self._stubs: + self._stubs['get_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/GetWorkflowTemplate', + request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs['get_workflow_template'] + + @property + def instantiate_workflow_template(self) -> Callable[ + [workflow_templates.InstantiateWorkflowTemplateRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the instantiate workflow template method over gRPC. + + Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateWorkflowTemplateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'instantiate_workflow_template' not in self._stubs: + self._stubs['instantiate_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateWorkflowTemplate', + request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['instantiate_workflow_template'] + + @property + def instantiate_inline_workflow_template(self) -> Callable[ + [workflow_templates.InstantiateInlineWorkflowTemplateRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the instantiate inline workflow + template method over gRPC. + + Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateInlineWorkflowTemplateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'instantiate_inline_workflow_template' not in self._stubs: + self._stubs['instantiate_inline_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateInlineWorkflowTemplate', + request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['instantiate_inline_workflow_template'] + + @property + def update_workflow_template(self) -> Callable[ + [workflow_templates.UpdateWorkflowTemplateRequest], + Awaitable[workflow_templates.WorkflowTemplate]]: + r"""Return a callable for the update workflow template method over gRPC. + + Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Returns: + Callable[[~.UpdateWorkflowTemplateRequest], + Awaitable[~.WorkflowTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_workflow_template' not in self._stubs: + self._stubs['update_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/UpdateWorkflowTemplate', + request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs['update_workflow_template'] + + @property + def list_workflow_templates(self) -> Callable[ + [workflow_templates.ListWorkflowTemplatesRequest], + Awaitable[workflow_templates.ListWorkflowTemplatesResponse]]: + r"""Return a callable for the list workflow templates method over gRPC. + + Lists workflows that match the specified filter in + the request. + + Returns: + Callable[[~.ListWorkflowTemplatesRequest], + Awaitable[~.ListWorkflowTemplatesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_workflow_templates' not in self._stubs: + self._stubs['list_workflow_templates'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/ListWorkflowTemplates', + request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize, + response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize, + ) + return self._stubs['list_workflow_templates'] + + @property + def delete_workflow_template(self) -> Callable[ + [workflow_templates.DeleteWorkflowTemplateRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete workflow template method over gRPC. + + Deletes a workflow template. It does not cancel in- + rogress workflows. + + Returns: + Callable[[~.DeleteWorkflowTemplateRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_workflow_template' not in self._stubs: + self._stubs['delete_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1.WorkflowTemplateService/DeleteWorkflowTemplate', + request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_workflow_template'] + + +__all__ = ( + 'WorkflowTemplateServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/__init__.py new file mode 100644 index 00000000..92d3cd45 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/__init__.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .autoscaling_policies import ( + AutoscalingPolicy, + BasicAutoscalingAlgorithm, + BasicYarnAutoscalingConfig, + CreateAutoscalingPolicyRequest, + DeleteAutoscalingPolicyRequest, + GetAutoscalingPolicyRequest, + InstanceGroupAutoscalingPolicyConfig, + ListAutoscalingPoliciesRequest, + ListAutoscalingPoliciesResponse, + UpdateAutoscalingPolicyRequest, +) +from .clusters import ( + AcceleratorConfig, + AutoscalingConfig, + Cluster, + ClusterConfig, + ClusterMetrics, + ClusterStatus, + CreateClusterRequest, + DeleteClusterRequest, + DiagnoseClusterRequest, + DiagnoseClusterResults, + DiskConfig, + EncryptionConfig, + EndpointConfig, + GceClusterConfig, + GetClusterRequest, + GkeClusterConfig, + IdentityConfig, + InstanceGroupConfig, + KerberosConfig, + LifecycleConfig, + ListClustersRequest, + ListClustersResponse, + ManagedGroupConfig, + MetastoreConfig, + NodeGroupAffinity, + NodeInitializationAction, + ReservationAffinity, + SecurityConfig, + ShieldedInstanceConfig, + SoftwareConfig, + StartClusterRequest, + StopClusterRequest, + UpdateClusterRequest, +) +from .jobs import ( + CancelJobRequest, + DeleteJobRequest, + GetJobRequest, + HadoopJob, + HiveJob, + Job, + JobMetadata, + JobPlacement, + JobReference, + JobScheduling, + JobStatus, + ListJobsRequest, + ListJobsResponse, + LoggingConfig, + PigJob, + PrestoJob, + PySparkJob, + QueryList, + SparkJob, + SparkRJob, + SparkSqlJob, + SubmitJobRequest, + UpdateJobRequest, + YarnApplication, +) +from .operations import ( + ClusterOperationMetadata, + ClusterOperationStatus, +) +from .workflow_templates import ( + ClusterOperation, + ClusterSelector, + CreateWorkflowTemplateRequest, + DeleteWorkflowTemplateRequest, + GetWorkflowTemplateRequest, + InstantiateInlineWorkflowTemplateRequest, + InstantiateWorkflowTemplateRequest, + ListWorkflowTemplatesRequest, + ListWorkflowTemplatesResponse, + ManagedCluster, + OrderedJob, + ParameterValidation, + RegexValidation, + TemplateParameter, + UpdateWorkflowTemplateRequest, + ValueValidation, + WorkflowGraph, + WorkflowMetadata, + WorkflowNode, + WorkflowTemplate, + WorkflowTemplatePlacement, +) + +__all__ = ( + 'AutoscalingPolicy', + 'BasicAutoscalingAlgorithm', + 'BasicYarnAutoscalingConfig', + 'CreateAutoscalingPolicyRequest', + 'DeleteAutoscalingPolicyRequest', + 'GetAutoscalingPolicyRequest', + 'InstanceGroupAutoscalingPolicyConfig', + 'ListAutoscalingPoliciesRequest', + 'ListAutoscalingPoliciesResponse', + 'UpdateAutoscalingPolicyRequest', + 'AcceleratorConfig', + 'AutoscalingConfig', + 'Cluster', + 'ClusterConfig', + 'ClusterMetrics', + 'ClusterStatus', + 'CreateClusterRequest', + 'DeleteClusterRequest', + 'DiagnoseClusterRequest', + 'DiagnoseClusterResults', + 'DiskConfig', + 'EncryptionConfig', + 'EndpointConfig', + 'GceClusterConfig', + 'GetClusterRequest', + 'GkeClusterConfig', + 'IdentityConfig', + 'InstanceGroupConfig', + 'KerberosConfig', + 'LifecycleConfig', + 'ListClustersRequest', + 'ListClustersResponse', + 'ManagedGroupConfig', + 'MetastoreConfig', + 'NodeGroupAffinity', + 'NodeInitializationAction', + 'ReservationAffinity', + 'SecurityConfig', + 'ShieldedInstanceConfig', + 'SoftwareConfig', + 'StartClusterRequest', + 'StopClusterRequest', + 'UpdateClusterRequest', + 'CancelJobRequest', + 'DeleteJobRequest', + 'GetJobRequest', + 'HadoopJob', + 'HiveJob', + 'Job', + 'JobMetadata', + 'JobPlacement', + 'JobReference', + 'JobScheduling', + 'JobStatus', + 'ListJobsRequest', + 'ListJobsResponse', + 'LoggingConfig', + 'PigJob', + 'PrestoJob', + 'PySparkJob', + 'QueryList', + 'SparkJob', + 'SparkRJob', + 'SparkSqlJob', + 'SubmitJobRequest', + 'UpdateJobRequest', + 'YarnApplication', + 'ClusterOperationMetadata', + 'ClusterOperationStatus', + 'Component', + 'ClusterOperation', + 'ClusterSelector', + 'CreateWorkflowTemplateRequest', + 'DeleteWorkflowTemplateRequest', + 'GetWorkflowTemplateRequest', + 'InstantiateInlineWorkflowTemplateRequest', + 'InstantiateWorkflowTemplateRequest', + 'ListWorkflowTemplatesRequest', + 'ListWorkflowTemplatesResponse', + 'ManagedCluster', + 'OrderedJob', + 'ParameterValidation', + 'RegexValidation', + 'TemplateParameter', + 'UpdateWorkflowTemplateRequest', + 'ValueValidation', + 'WorkflowGraph', + 'WorkflowMetadata', + 'WorkflowNode', + 'WorkflowTemplate', + 'WorkflowTemplatePlacement', +) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/autoscaling_policies.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/autoscaling_policies.py new file mode 100644 index 00000000..2d9f8651 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/autoscaling_policies.py @@ -0,0 +1,416 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.dataproc.v1', + manifest={ + 'AutoscalingPolicy', + 'BasicAutoscalingAlgorithm', + 'BasicYarnAutoscalingConfig', + 'InstanceGroupAutoscalingPolicyConfig', + 'CreateAutoscalingPolicyRequest', + 'GetAutoscalingPolicyRequest', + 'UpdateAutoscalingPolicyRequest', + 'DeleteAutoscalingPolicyRequest', + 'ListAutoscalingPoliciesRequest', + 'ListAutoscalingPoliciesResponse', + }, +) + + +class AutoscalingPolicy(proto.Message): + r"""Describes an autoscaling policy for Dataproc cluster + autoscaler. + + Attributes: + id (str): + Required. The policy id. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). Cannot begin or end with + underscore or hyphen. Must consist of between 3 and 50 + characters. + name (str): + Output only. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies``, the + resource name of the policy has the following format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + basic_algorithm (google.cloud.dataproc_v1.types.BasicAutoscalingAlgorithm): + + worker_config (google.cloud.dataproc_v1.types.InstanceGroupAutoscalingPolicyConfig): + Required. Describes how the autoscaler will + operate for primary workers. + secondary_worker_config (google.cloud.dataproc_v1.types.InstanceGroupAutoscalingPolicyConfig): + Optional. Describes how the autoscaler will + operate for secondary workers. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + name = proto.Field( + proto.STRING, + number=2, + ) + basic_algorithm = proto.Field( + proto.MESSAGE, + number=3, + oneof='algorithm', + message='BasicAutoscalingAlgorithm', + ) + worker_config = proto.Field( + proto.MESSAGE, + number=4, + message='InstanceGroupAutoscalingPolicyConfig', + ) + secondary_worker_config = proto.Field( + proto.MESSAGE, + number=5, + message='InstanceGroupAutoscalingPolicyConfig', + ) + + +class BasicAutoscalingAlgorithm(proto.Message): + r"""Basic algorithm for autoscaling. + Attributes: + yarn_config (google.cloud.dataproc_v1.types.BasicYarnAutoscalingConfig): + Required. YARN autoscaling configuration. + cooldown_period (google.protobuf.duration_pb2.Duration): + Optional. Duration between scaling events. A scaling period + starts after the update operation from the previous event + has completed. + + Bounds: [2m, 1d]. Default: 2m. + """ + + yarn_config = proto.Field( + proto.MESSAGE, + number=1, + message='BasicYarnAutoscalingConfig', + ) + cooldown_period = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + + +class BasicYarnAutoscalingConfig(proto.Message): + r"""Basic autoscaling configurations for YARN. + Attributes: + graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): + Required. Timeout for YARN graceful decommissioning of Node + Managers. Specifies the duration to wait for jobs to + complete before forcefully removing workers (and potentially + interrupting jobs). Only applicable to downscaling + operations. + + Bounds: [0s, 1d]. + scale_up_factor (float): + Required. Fraction of average YARN pending memory in the + last cooldown period for which to add workers. A scale-up + factor of 1.0 will result in scaling up so that there is no + pending memory remaining after the update (more aggressive + scaling). A scale-up factor closer to 0 will result in a + smaller magnitude of scaling up (less aggressive scaling). + See `How autoscaling + works `__ + for more information. + + Bounds: [0.0, 1.0]. + scale_down_factor (float): + Required. Fraction of average YARN pending memory in the + last cooldown period for which to remove workers. A + scale-down factor of 1 will result in scaling down so that + there is no available memory remaining after the update + (more aggressive scaling). A scale-down factor of 0 disables + removing workers, which can be beneficial for autoscaling a + single job. See `How autoscaling + works `__ + for more information. + + Bounds: [0.0, 1.0]. + scale_up_min_worker_fraction (float): + Optional. Minimum scale-up threshold as a fraction of total + cluster size before scaling occurs. For example, in a + 20-worker cluster, a threshold of 0.1 means the autoscaler + must recommend at least a 2-worker scale-up for the cluster + to scale. A threshold of 0 means the autoscaler will scale + up on any recommended change. + + Bounds: [0.0, 1.0]. Default: 0.0. + scale_down_min_worker_fraction (float): + Optional. Minimum scale-down threshold as a fraction of + total cluster size before scaling occurs. For example, in a + 20-worker cluster, a threshold of 0.1 means the autoscaler + must recommend at least a 2 worker scale-down for the + cluster to scale. A threshold of 0 means the autoscaler will + scale down on any recommended change. + + Bounds: [0.0, 1.0]. Default: 0.0. + """ + + graceful_decommission_timeout = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + scale_up_factor = proto.Field( + proto.DOUBLE, + number=1, + ) + scale_down_factor = proto.Field( + proto.DOUBLE, + number=2, + ) + scale_up_min_worker_fraction = proto.Field( + proto.DOUBLE, + number=3, + ) + scale_down_min_worker_fraction = proto.Field( + proto.DOUBLE, + number=4, + ) + + +class InstanceGroupAutoscalingPolicyConfig(proto.Message): + r"""Configuration for the size bounds of an instance group, + including its proportional size to other groups. + + Attributes: + min_instances (int): + Optional. Minimum number of instances for this group. + + Primary workers - Bounds: [2, max_instances]. Default: 2. + Secondary workers - Bounds: [0, max_instances]. Default: 0. + max_instances (int): + Required. Maximum number of instances for this group. + Required for primary workers. Note that by default, clusters + will not use secondary workers. Required for secondary + workers if the minimum secondary instances is set. + + Primary workers - Bounds: [min_instances, ). Secondary + workers - Bounds: [min_instances, ). Default: 0. + weight (int): + Optional. Weight for the instance group, which is used to + determine the fraction of total workers in the cluster from + this instance group. For example, if primary workers have + weight 2, and secondary workers have weight 1, the cluster + will have approximately 2 primary workers for each secondary + worker. + + The cluster may not reach the specified balance if + constrained by min/max bounds or other autoscaling settings. + For example, if ``max_instances`` for secondary workers is + 0, then only primary workers will be added. The cluster can + also be out of balance when created. + + If weight is not set on any instance group, the cluster will + default to equal weight for all groups: the cluster will + attempt to maintain an equal number of workers in each group + within the configured size bounds for each group. If weight + is set for one group only, the cluster will default to zero + weight on the unset group. For example if weight is set only + on primary workers, the cluster will use primary workers + only and no secondary workers. + """ + + min_instances = proto.Field( + proto.INT32, + number=1, + ) + max_instances = proto.Field( + proto.INT32, + number=2, + ) + weight = proto.Field( + proto.INT32, + number=3, + ) + + +class CreateAutoscalingPolicyRequest(proto.Message): + r"""A request to create an autoscaling policy. + Attributes: + parent (str): + Required. The "resource name" of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.create``, + the resource name of the location has the following + format: ``projects/{project_id}/locations/{location}`` + policy (google.cloud.dataproc_v1.types.AutoscalingPolicy): + Required. The autoscaling policy to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + policy = proto.Field( + proto.MESSAGE, + number=2, + message='AutoscalingPolicy', + ) + + +class GetAutoscalingPolicyRequest(proto.Message): + r"""A request to fetch an autoscaling policy. + Attributes: + name (str): + Required. The "resource name" of the autoscaling policy, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateAutoscalingPolicyRequest(proto.Message): + r"""A request to update an autoscaling policy. + Attributes: + policy (google.cloud.dataproc_v1.types.AutoscalingPolicy): + Required. The updated autoscaling policy. + """ + + policy = proto.Field( + proto.MESSAGE, + number=1, + message='AutoscalingPolicy', + ) + + +class DeleteAutoscalingPolicyRequest(proto.Message): + r"""A request to delete an autoscaling policy. + Autoscaling policies in use by one or more clusters will not be + deleted. + + Attributes: + name (str): + Required. The "resource name" of the autoscaling policy, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.delete``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.delete``, + the resource name of the policy has the following format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListAutoscalingPoliciesRequest(proto.Message): + r"""A request to list autoscaling policies in a project. + Attributes: + parent (str): + Required. The "resource name" of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.list``, the + resource name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + page_size (int): + Optional. The maximum number of results to + return in each response. Must be less than or + equal to 1000. Defaults to 100. + page_token (str): + Optional. The page token, returned by a + previous call, to request the next page of + results. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + + +class ListAutoscalingPoliciesResponse(proto.Message): + r"""A response to a request to list autoscaling policies in a + project. + + Attributes: + policies (Sequence[google.cloud.dataproc_v1.types.AutoscalingPolicy]): + Output only. Autoscaling policies list. + next_page_token (str): + Output only. This token is included in the + response if there are more results to fetch. + """ + + @property + def raw_page(self): + return self + + policies = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='AutoscalingPolicy', + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/clusters.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/clusters.py new file mode 100644 index 00000000..f2395056 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/clusters.py @@ -0,0 +1,1797 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.dataproc_v1.types import shared +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.dataproc.v1', + manifest={ + 'Cluster', + 'ClusterConfig', + 'GkeClusterConfig', + 'EndpointConfig', + 'AutoscalingConfig', + 'EncryptionConfig', + 'GceClusterConfig', + 'NodeGroupAffinity', + 'ShieldedInstanceConfig', + 'InstanceGroupConfig', + 'ManagedGroupConfig', + 'AcceleratorConfig', + 'DiskConfig', + 'NodeInitializationAction', + 'ClusterStatus', + 'SecurityConfig', + 'KerberosConfig', + 'IdentityConfig', + 'SoftwareConfig', + 'LifecycleConfig', + 'MetastoreConfig', + 'ClusterMetrics', + 'CreateClusterRequest', + 'UpdateClusterRequest', + 'StopClusterRequest', + 'StartClusterRequest', + 'DeleteClusterRequest', + 'GetClusterRequest', + 'ListClustersRequest', + 'ListClustersResponse', + 'DiagnoseClusterRequest', + 'DiagnoseClusterResults', + 'ReservationAffinity', + }, +) + + +class Cluster(proto.Message): + r"""Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + Attributes: + project_id (str): + Required. The Google Cloud Platform project + ID that the cluster belongs to. + cluster_name (str): + Required. The cluster name. Cluster names + within a project must be unique. Names of + deleted clusters can be reused. + config (google.cloud.dataproc_v1.types.ClusterConfig): + Required. The cluster config. Note that + Dataproc may set default values, and values may + change when clusters are updated. + labels (Sequence[google.cloud.dataproc_v1.types.Cluster.LabelsEntry]): + Optional. The labels to associate with this cluster. Label + **keys** must contain 1 to 63 characters, and must conform + to `RFC 1035 `__. + Label **values** may be empty, but, if present, must contain + 1 to 63 characters, and must conform to `RFC + 1035 `__. No more than + 32 labels can be associated with a cluster. + status (google.cloud.dataproc_v1.types.ClusterStatus): + Output only. Cluster status. + status_history (Sequence[google.cloud.dataproc_v1.types.ClusterStatus]): + Output only. The previous cluster status. + cluster_uuid (str): + Output only. A cluster UUID (Unique Universal + Identifier). Dataproc generates this value when + it creates the cluster. + metrics (google.cloud.dataproc_v1.types.ClusterMetrics): + Output only. Contains cluster daemon metrics such as HDFS + and YARN stats. + + **Beta Feature**: This report is available for testing + purposes only. It may be changed before final release. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + cluster_name = proto.Field( + proto.STRING, + number=2, + ) + config = proto.Field( + proto.MESSAGE, + number=3, + message='ClusterConfig', + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + status = proto.Field( + proto.MESSAGE, + number=4, + message='ClusterStatus', + ) + status_history = proto.RepeatedField( + proto.MESSAGE, + number=7, + message='ClusterStatus', + ) + cluster_uuid = proto.Field( + proto.STRING, + number=6, + ) + metrics = proto.Field( + proto.MESSAGE, + number=9, + message='ClusterMetrics', + ) + + +class ClusterConfig(proto.Message): + r"""The cluster config. + Attributes: + config_bucket (str): + Optional. A Cloud Storage bucket used to stage job + dependencies, config files, and job driver console output. + If you do not specify a staging bucket, Cloud Dataproc will + determine a Cloud Storage location (US, ASIA, or EU) for + your cluster's staging bucket according to the Compute + Engine zone where your cluster is deployed, and then create + and manage this project-level, per-location bucket (see + `Dataproc staging + bucket `__). + **This field requires a Cloud Storage bucket name, not a URI + to a Cloud Storage bucket.** + temp_bucket (str): + Optional. A Cloud Storage bucket used to store ephemeral + cluster and jobs data, such as Spark and MapReduce history + files. If you do not specify a temp bucket, Dataproc will + determine a Cloud Storage location (US, ASIA, or EU) for + your cluster's temp bucket according to the Compute Engine + zone where your cluster is deployed, and then create and + manage this project-level, per-location bucket. The default + bucket has a TTL of 90 days, but you can use any TTL (or + none) if you specify a bucket. **This field requires a Cloud + Storage bucket name, not a URI to a Cloud Storage bucket.** + gce_cluster_config (google.cloud.dataproc_v1.types.GceClusterConfig): + Optional. The shared Compute Engine config + settings for all instances in a cluster. + master_config (google.cloud.dataproc_v1.types.InstanceGroupConfig): + Optional. The Compute Engine config settings + for the master instance in a cluster. + worker_config (google.cloud.dataproc_v1.types.InstanceGroupConfig): + Optional. The Compute Engine config settings + for worker instances in a cluster. + secondary_worker_config (google.cloud.dataproc_v1.types.InstanceGroupConfig): + Optional. The Compute Engine config settings + for additional worker instances in a cluster. + software_config (google.cloud.dataproc_v1.types.SoftwareConfig): + Optional. The config settings for software + inside the cluster. + initialization_actions (Sequence[google.cloud.dataproc_v1.types.NodeInitializationAction]): + Optional. Commands to execute on each node after config is + completed. By default, executables are run on master and all + worker nodes. You can test a node's ``role`` metadata to run + an executable on a master or worker node, as shown below + using ``curl`` (you can also use ``wget``): + + :: + + ROLE=$(curl -H Metadata-Flavor:Google + http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + if [[ "${ROLE}" == 'Master' ]]; then + ... master specific actions ... + else + ... worker specific actions ... + fi + encryption_config (google.cloud.dataproc_v1.types.EncryptionConfig): + Optional. Encryption settings for the + cluster. + autoscaling_config (google.cloud.dataproc_v1.types.AutoscalingConfig): + Optional. Autoscaling config for the policy + associated with the cluster. Cluster does not + autoscale if this field is unset. + security_config (google.cloud.dataproc_v1.types.SecurityConfig): + Optional. Security settings for the cluster. + lifecycle_config (google.cloud.dataproc_v1.types.LifecycleConfig): + Optional. Lifecycle setting for the cluster. + endpoint_config (google.cloud.dataproc_v1.types.EndpointConfig): + Optional. Port/endpoint configuration for + this cluster + metastore_config (google.cloud.dataproc_v1.types.MetastoreConfig): + Optional. Metastore configuration. + gke_cluster_config (google.cloud.dataproc_v1.types.GkeClusterConfig): + Optional. BETA. The Kubernetes Engine config for Dataproc + clusters deployed to Kubernetes. Setting this is considered + mutually exclusive with Compute Engine-based options such as + ``gce_cluster_config``, ``master_config``, + ``worker_config``, ``secondary_worker_config``, and + ``autoscaling_config``. + """ + + config_bucket = proto.Field( + proto.STRING, + number=1, + ) + temp_bucket = proto.Field( + proto.STRING, + number=2, + ) + gce_cluster_config = proto.Field( + proto.MESSAGE, + number=8, + message='GceClusterConfig', + ) + master_config = proto.Field( + proto.MESSAGE, + number=9, + message='InstanceGroupConfig', + ) + worker_config = proto.Field( + proto.MESSAGE, + number=10, + message='InstanceGroupConfig', + ) + secondary_worker_config = proto.Field( + proto.MESSAGE, + number=12, + message='InstanceGroupConfig', + ) + software_config = proto.Field( + proto.MESSAGE, + number=13, + message='SoftwareConfig', + ) + initialization_actions = proto.RepeatedField( + proto.MESSAGE, + number=11, + message='NodeInitializationAction', + ) + encryption_config = proto.Field( + proto.MESSAGE, + number=15, + message='EncryptionConfig', + ) + autoscaling_config = proto.Field( + proto.MESSAGE, + number=18, + message='AutoscalingConfig', + ) + security_config = proto.Field( + proto.MESSAGE, + number=16, + message='SecurityConfig', + ) + lifecycle_config = proto.Field( + proto.MESSAGE, + number=17, + message='LifecycleConfig', + ) + endpoint_config = proto.Field( + proto.MESSAGE, + number=19, + message='EndpointConfig', + ) + metastore_config = proto.Field( + proto.MESSAGE, + number=20, + message='MetastoreConfig', + ) + gke_cluster_config = proto.Field( + proto.MESSAGE, + number=21, + message='GkeClusterConfig', + ) + + +class GkeClusterConfig(proto.Message): + r"""The GKE config for this cluster. + Attributes: + namespaced_gke_deployment_target (google.cloud.dataproc_v1.types.GkeClusterConfig.NamespacedGkeDeploymentTarget): + Optional. A target for the deployment. + """ + + class NamespacedGkeDeploymentTarget(proto.Message): + r"""A full, namespace-isolated deployment target for an existing + GKE cluster. + + Attributes: + target_gke_cluster (str): + Optional. The target GKE cluster to deploy to. Format: + 'projects/{project}/locations/{location}/clusters/{cluster_id}' + cluster_namespace (str): + Optional. A namespace within the GKE cluster + to deploy into. + """ + + target_gke_cluster = proto.Field( + proto.STRING, + number=1, + ) + cluster_namespace = proto.Field( + proto.STRING, + number=2, + ) + + namespaced_gke_deployment_target = proto.Field( + proto.MESSAGE, + number=1, + message=NamespacedGkeDeploymentTarget, + ) + + +class EndpointConfig(proto.Message): + r"""Endpoint config for this cluster + Attributes: + http_ports (Sequence[google.cloud.dataproc_v1.types.EndpointConfig.HttpPortsEntry]): + Output only. The map of port descriptions to URLs. Will only + be populated if enable_http_port_access is true. + enable_http_port_access (bool): + Optional. If true, enable http access to + specific ports on the cluster from external + sources. Defaults to false. + """ + + http_ports = proto.MapField( + proto.STRING, + proto.STRING, + number=1, + ) + enable_http_port_access = proto.Field( + proto.BOOL, + number=2, + ) + + +class AutoscalingConfig(proto.Message): + r"""Autoscaling Policy config associated with the cluster. + Attributes: + policy_uri (str): + Optional. The autoscaling policy used by the cluster. + + Only resource names including projectid and location + (region) are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`` + - ``projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`` + + Note that the policy must be in the same project and + Dataproc region. + """ + + policy_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class EncryptionConfig(proto.Message): + r"""Encryption settings for the cluster. + Attributes: + gce_pd_kms_key_name (str): + Optional. The Cloud KMS key name to use for + PD disk encryption for all instances in the + cluster. + """ + + gce_pd_kms_key_name = proto.Field( + proto.STRING, + number=1, + ) + + +class GceClusterConfig(proto.Message): + r"""Common config settings for resources of Compute Engine + cluster instances, applicable to all instances in the cluster. + + Attributes: + zone_uri (str): + Optional. The zone where the Compute Engine cluster will be + located. On a create request, it is required in the "global" + region. If omitted in a non-global Dataproc region, the + service will pick a zone in the corresponding Compute Engine + region. On a get request, zone will always be present. + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`` + - ``projects/[project_id]/zones/[zone]`` + - ``us-central1-f`` + network_uri (str): + Optional. The Compute Engine network to be used for machine + communications. Cannot be specified with subnetwork_uri. If + neither ``network_uri`` nor ``subnetwork_uri`` is specified, + the "default" network of the project is used, if it exists. + Cannot be a "Custom Subnet Network" (see `Using + Subnetworks `__ + for more information). + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`` + - ``projects/[project_id]/regions/global/default`` + - ``default`` + subnetwork_uri (str): + Optional. The Compute Engine subnetwork to be used for + machine communications. Cannot be specified with + network_uri. + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0`` + - ``projects/[project_id]/regions/us-east1/subnetworks/sub0`` + - ``sub0`` + internal_ip_only (bool): + Optional. If true, all instances in the cluster will only + have internal IP addresses. By default, clusters are not + restricted to internal IP addresses, and will have ephemeral + external IP addresses assigned to each instance. This + ``internal_ip_only`` restriction can only be enabled for + subnetwork enabled networks, and all off-cluster + dependencies must be configured to be accessible without + external IP addresses. + private_ipv6_google_access (google.cloud.dataproc_v1.types.GceClusterConfig.PrivateIpv6GoogleAccess): + Optional. The type of IPv6 access for a + cluster. + service_account (str): + Optional. The `Dataproc service + account `__ + (also see `VM Data Plane + identity `__) + used by Dataproc cluster VM instances to access Google Cloud + Platform services. + + If not specified, the `Compute Engine default service + account `__ + is used. + service_account_scopes (Sequence[str]): + Optional. The URIs of service account scopes to be included + in Compute Engine instances. The following base set of + scopes is always included: + + - https://www.googleapis.com/auth/cloud.useraccounts.readonly + - https://www.googleapis.com/auth/devstorage.read_write + - https://www.googleapis.com/auth/logging.write + + If no scopes are specified, the following defaults are also + provided: + + - https://www.googleapis.com/auth/bigquery + - https://www.googleapis.com/auth/bigtable.admin.table + - https://www.googleapis.com/auth/bigtable.data + - https://www.googleapis.com/auth/devstorage.full_control + tags (Sequence[str]): + The Compute Engine tags to add to all instances (see + `Tagging + instances `__). + metadata (Sequence[google.cloud.dataproc_v1.types.GceClusterConfig.MetadataEntry]): + The Compute Engine metadata entries to add to all instances + (see `Project and instance + metadata `__). + reservation_affinity (google.cloud.dataproc_v1.types.ReservationAffinity): + Optional. Reservation Affinity for consuming + Zonal reservation. + node_group_affinity (google.cloud.dataproc_v1.types.NodeGroupAffinity): + Optional. Node Group Affinity for sole-tenant + clusters. + shielded_instance_config (google.cloud.dataproc_v1.types.ShieldedInstanceConfig): + Optional. Shielded Instance Config for clusters using + `Compute Engine Shielded + VMs `__. + """ + class PrivateIpv6GoogleAccess(proto.Enum): + r"""``PrivateIpv6GoogleAccess`` controls whether and how Dataproc + cluster nodes can communicate with Google Services through gRPC over + IPv6. These values are directly mapped to corresponding values in + the `Compute Engine Instance + fields `__. + """ + PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED = 0 + INHERIT_FROM_SUBNETWORK = 1 + OUTBOUND = 2 + BIDIRECTIONAL = 3 + + zone_uri = proto.Field( + proto.STRING, + number=1, + ) + network_uri = proto.Field( + proto.STRING, + number=2, + ) + subnetwork_uri = proto.Field( + proto.STRING, + number=6, + ) + internal_ip_only = proto.Field( + proto.BOOL, + number=7, + ) + private_ipv6_google_access = proto.Field( + proto.ENUM, + number=12, + enum=PrivateIpv6GoogleAccess, + ) + service_account = proto.Field( + proto.STRING, + number=8, + ) + service_account_scopes = proto.RepeatedField( + proto.STRING, + number=3, + ) + tags = proto.RepeatedField( + proto.STRING, + number=4, + ) + metadata = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + reservation_affinity = proto.Field( + proto.MESSAGE, + number=11, + message='ReservationAffinity', + ) + node_group_affinity = proto.Field( + proto.MESSAGE, + number=13, + message='NodeGroupAffinity', + ) + shielded_instance_config = proto.Field( + proto.MESSAGE, + number=14, + message='ShieldedInstanceConfig', + ) + + +class NodeGroupAffinity(proto.Message): + r"""Node Group Affinity for clusters using sole-tenant node + groups. + + Attributes: + node_group_uri (str): + Required. The URI of a sole-tenant `node group + resource `__ + that the cluster will be created on. + + A full URL, partial URI, or node group name are valid. + Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`` + - ``projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`` + - ``node-group-1`` + """ + + node_group_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class ShieldedInstanceConfig(proto.Message): + r"""Shielded Instance Config for clusters using `Compute Engine Shielded + VMs `__. + + Attributes: + enable_secure_boot (bool): + Optional. Defines whether instances have + Secure Boot enabled. + enable_vtpm (bool): + Optional. Defines whether instances have the + vTPM enabled. + enable_integrity_monitoring (bool): + Optional. Defines whether instances have + integrity monitoring enabled. + """ + + enable_secure_boot = proto.Field( + proto.BOOL, + number=1, + ) + enable_vtpm = proto.Field( + proto.BOOL, + number=2, + ) + enable_integrity_monitoring = proto.Field( + proto.BOOL, + number=3, + ) + + +class InstanceGroupConfig(proto.Message): + r"""The config settings for Compute Engine resources in + an instance group, such as a master or worker group. + + Attributes: + num_instances (int): + Optional. The number of VM instances in the instance group. + For `HA + cluster `__ + `master_config <#FIELDS.master_config>`__ groups, **must be + set to 3**. For standard cluster + `master_config <#FIELDS.master_config>`__ groups, **must be + set to 1**. + instance_names (Sequence[str]): + Output only. The list of instance names. Dataproc derives + the names from ``cluster_name``, ``num_instances``, and the + instance group. + image_uri (str): + Optional. The Compute Engine image resource used for cluster + instances. + + The URI can represent an image or image family. + + Image examples: + + - ``https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`` + - ``projects/[project_id]/global/images/[image-id]`` + - ``image-id`` + + Image family examples. Dataproc will use the most recent + image from the family: + + - ``https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`` + - ``projects/[project_id]/global/images/family/[custom-image-family-name]`` + + If the URI is unspecified, it will be inferred from + ``SoftwareConfig.image_version`` or the system default. + machine_type_uri (str): + Optional. The Compute Engine machine type used for cluster + instances. + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` + - ``projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` + - ``n1-standard-2`` + + **Auto Zone Exception**: If you are using the Dataproc `Auto + Zone + Placement `__ + feature, you must use the short name of the machine type + resource, for example, ``n1-standard-2``. + disk_config (google.cloud.dataproc_v1.types.DiskConfig): + Optional. Disk option config settings. + is_preemptible (bool): + Output only. Specifies that this instance + group contains preemptible instances. + preemptibility (google.cloud.dataproc_v1.types.InstanceGroupConfig.Preemptibility): + Optional. Specifies the preemptibility of the instance + group. + + The default value for master and worker groups is + ``NON_PREEMPTIBLE``. This default cannot be changed. + + The default value for secondary instances is + ``PREEMPTIBLE``. + managed_group_config (google.cloud.dataproc_v1.types.ManagedGroupConfig): + Output only. The config for Compute Engine + Instance Group Manager that manages this group. + This is only used for preemptible instance + groups. + accelerators (Sequence[google.cloud.dataproc_v1.types.AcceleratorConfig]): + Optional. The Compute Engine accelerator + configuration for these instances. + min_cpu_platform (str): + Optional. Specifies the minimum cpu platform for the + Instance Group. See `Dataproc -> Minimum CPU + Platform `__. + """ + class Preemptibility(proto.Enum): + r"""Controls the use of [preemptible instances] + (https://cloud.google.com/compute/docs/instances/preemptible) within + the group. + """ + PREEMPTIBILITY_UNSPECIFIED = 0 + NON_PREEMPTIBLE = 1 + PREEMPTIBLE = 2 + + num_instances = proto.Field( + proto.INT32, + number=1, + ) + instance_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + image_uri = proto.Field( + proto.STRING, + number=3, + ) + machine_type_uri = proto.Field( + proto.STRING, + number=4, + ) + disk_config = proto.Field( + proto.MESSAGE, + number=5, + message='DiskConfig', + ) + is_preemptible = proto.Field( + proto.BOOL, + number=6, + ) + preemptibility = proto.Field( + proto.ENUM, + number=10, + enum=Preemptibility, + ) + managed_group_config = proto.Field( + proto.MESSAGE, + number=7, + message='ManagedGroupConfig', + ) + accelerators = proto.RepeatedField( + proto.MESSAGE, + number=8, + message='AcceleratorConfig', + ) + min_cpu_platform = proto.Field( + proto.STRING, + number=9, + ) + + +class ManagedGroupConfig(proto.Message): + r"""Specifies the resources used to actively manage an instance + group. + + Attributes: + instance_template_name (str): + Output only. The name of the Instance + Template used for the Managed Instance Group. + instance_group_manager_name (str): + Output only. The name of the Instance Group + Manager for this group. + """ + + instance_template_name = proto.Field( + proto.STRING, + number=1, + ) + instance_group_manager_name = proto.Field( + proto.STRING, + number=2, + ) + + +class AcceleratorConfig(proto.Message): + r"""Specifies the type and number of accelerator cards attached to the + instances of an instance. See `GPUs on Compute + Engine `__. + + Attributes: + accelerator_type_uri (str): + Full URL, partial URI, or short name of the accelerator type + resource to expose to this instance. See `Compute Engine + AcceleratorTypes `__. + + Examples: + + - ``https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` + - ``projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` + - ``nvidia-tesla-k80`` + + **Auto Zone Exception**: If you are using the Dataproc `Auto + Zone + Placement `__ + feature, you must use the short name of the accelerator type + resource, for example, ``nvidia-tesla-k80``. + accelerator_count (int): + The number of the accelerator cards of this + type exposed to this instance. + """ + + accelerator_type_uri = proto.Field( + proto.STRING, + number=1, + ) + accelerator_count = proto.Field( + proto.INT32, + number=2, + ) + + +class DiskConfig(proto.Message): + r"""Specifies the config of disk options for a group of VM + instances. + + Attributes: + boot_disk_type (str): + Optional. Type of the boot disk (default is "pd-standard"). + Valid values: "pd-balanced" (Persistent Disk Balanced Solid + State Drive), "pd-ssd" (Persistent Disk Solid State Drive), + or "pd-standard" (Persistent Disk Hard Disk Drive). See + `Disk + types `__. + boot_disk_size_gb (int): + Optional. Size in GB of the boot disk + (default is 500GB). + num_local_ssds (int): + Optional. Number of attached SSDs, from 0 to 4 (default is + 0). If SSDs are not attached, the boot disk is used to store + runtime logs and + `HDFS `__ + data. If one or more SSDs are attached, this runtime bulk + data is spread across them, and the boot disk contains only + basic config and installed binaries. + """ + + boot_disk_type = proto.Field( + proto.STRING, + number=3, + ) + boot_disk_size_gb = proto.Field( + proto.INT32, + number=1, + ) + num_local_ssds = proto.Field( + proto.INT32, + number=2, + ) + + +class NodeInitializationAction(proto.Message): + r"""Specifies an executable to run on a fully configured node and + a timeout period for executable completion. + + Attributes: + executable_file (str): + Required. Cloud Storage URI of executable + file. + execution_timeout (google.protobuf.duration_pb2.Duration): + Optional. Amount of time executable has to complete. Default + is 10 minutes (see JSON representation of + `Duration `__). + + Cluster creation fails with an explanatory error message + (the name of the executable that caused the error and the + exceeded timeout period) if the executable is not completed + at end of the timeout period. + """ + + executable_file = proto.Field( + proto.STRING, + number=1, + ) + execution_timeout = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + + +class ClusterStatus(proto.Message): + r"""The status of a cluster and its instances. + Attributes: + state (google.cloud.dataproc_v1.types.ClusterStatus.State): + Output only. The cluster's state. + detail (str): + Optional. Output only. Details of cluster's + state. + state_start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when this state was entered (see JSON + representation of + `Timestamp `__). + substate (google.cloud.dataproc_v1.types.ClusterStatus.Substate): + Output only. Additional state information + that includes status reported by the agent. + """ + class State(proto.Enum): + r"""The cluster state.""" + UNKNOWN = 0 + CREATING = 1 + RUNNING = 2 + ERROR = 3 + DELETING = 4 + UPDATING = 5 + STOPPING = 6 + STOPPED = 7 + STARTING = 8 + + class Substate(proto.Enum): + r"""The cluster substate.""" + UNSPECIFIED = 0 + UNHEALTHY = 1 + STALE_STATUS = 2 + + state = proto.Field( + proto.ENUM, + number=1, + enum=State, + ) + detail = proto.Field( + proto.STRING, + number=2, + ) + state_start_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + substate = proto.Field( + proto.ENUM, + number=4, + enum=Substate, + ) + + +class SecurityConfig(proto.Message): + r"""Security related configuration, including encryption, + Kerberos, etc. + + Attributes: + kerberos_config (google.cloud.dataproc_v1.types.KerberosConfig): + Optional. Kerberos related configuration. + identity_config (google.cloud.dataproc_v1.types.IdentityConfig): + Optional. Identity related configuration, + including service account based secure multi- + tenancy user mappings. + """ + + kerberos_config = proto.Field( + proto.MESSAGE, + number=1, + message='KerberosConfig', + ) + identity_config = proto.Field( + proto.MESSAGE, + number=2, + message='IdentityConfig', + ) + + +class KerberosConfig(proto.Message): + r"""Specifies Kerberos related configuration. + Attributes: + enable_kerberos (bool): + Optional. Flag to indicate whether to + Kerberize the cluster (default: false). Set this + field to true to enable Kerberos on a cluster. + root_principal_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the root principal + password. + kms_key_uri (str): + Optional. The uri of the KMS key used to + encrypt various sensitive files. + keystore_uri (str): + Optional. The Cloud Storage URI of the + keystore file used for SSL encryption. If not + provided, Dataproc will provide a self-signed + certificate. + truststore_uri (str): + Optional. The Cloud Storage URI of the + truststore file used for SSL encryption. If not + provided, Dataproc will provide a self-signed + certificate. + keystore_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the password to the + user provided keystore. For the self-signed + certificate, this password is generated by + Dataproc. + key_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the password to the + user provided key. For the self-signed + certificate, this password is generated by + Dataproc. + truststore_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the password to the + user provided truststore. For the self-signed + certificate, this password is generated by + Dataproc. + cross_realm_trust_realm (str): + Optional. The remote realm the Dataproc on- + luster KDC will trust, should the user enable + cross realm trust. + cross_realm_trust_kdc (str): + Optional. The KDC (IP or hostname) for the + remote trusted realm in a cross realm trust + relationship. + cross_realm_trust_admin_server (str): + Optional. The admin server (IP or hostname) + for the remote trusted realm in a cross realm + trust relationship. + cross_realm_trust_shared_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the shared password + between the on-cluster Kerberos realm and the + remote trusted realm, in a cross realm trust + relationship. + kdc_db_key_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the master key of the + KDC database. + tgt_lifetime_hours (int): + Optional. The lifetime of the ticket granting + ticket, in hours. If not specified, or user + specifies 0, then default value 10 will be used. + realm (str): + Optional. The name of the on-cluster Kerberos + realm. If not specified, the uppercased domain + of hostnames will be the realm. + """ + + enable_kerberos = proto.Field( + proto.BOOL, + number=1, + ) + root_principal_password_uri = proto.Field( + proto.STRING, + number=2, + ) + kms_key_uri = proto.Field( + proto.STRING, + number=3, + ) + keystore_uri = proto.Field( + proto.STRING, + number=4, + ) + truststore_uri = proto.Field( + proto.STRING, + number=5, + ) + keystore_password_uri = proto.Field( + proto.STRING, + number=6, + ) + key_password_uri = proto.Field( + proto.STRING, + number=7, + ) + truststore_password_uri = proto.Field( + proto.STRING, + number=8, + ) + cross_realm_trust_realm = proto.Field( + proto.STRING, + number=9, + ) + cross_realm_trust_kdc = proto.Field( + proto.STRING, + number=10, + ) + cross_realm_trust_admin_server = proto.Field( + proto.STRING, + number=11, + ) + cross_realm_trust_shared_password_uri = proto.Field( + proto.STRING, + number=12, + ) + kdc_db_key_uri = proto.Field( + proto.STRING, + number=13, + ) + tgt_lifetime_hours = proto.Field( + proto.INT32, + number=14, + ) + realm = proto.Field( + proto.STRING, + number=15, + ) + + +class IdentityConfig(proto.Message): + r"""Identity related configuration, including service account + based secure multi-tenancy user mappings. + + Attributes: + user_service_account_mapping (Sequence[google.cloud.dataproc_v1.types.IdentityConfig.UserServiceAccountMappingEntry]): + Required. Map of user to service account. + """ + + user_service_account_mapping = proto.MapField( + proto.STRING, + proto.STRING, + number=1, + ) + + +class SoftwareConfig(proto.Message): + r"""Specifies the selection and config of software inside the + cluster. + + Attributes: + image_version (str): + Optional. The version of software inside the cluster. It + must be one of the supported `Dataproc + Versions `__, + such as "1.2" (including a subminor version, such as + "1.2.29"), or the `"preview" + version `__. + If unspecified, it defaults to the latest Debian version. + properties (Sequence[google.cloud.dataproc_v1.types.SoftwareConfig.PropertiesEntry]): + Optional. The properties to set on daemon config files. + + Property keys are specified in ``prefix:property`` format, + for example ``core:hadoop.tmp.dir``. The following are + supported prefixes and their mappings: + + - capacity-scheduler: ``capacity-scheduler.xml`` + - core: ``core-site.xml`` + - distcp: ``distcp-default.xml`` + - hdfs: ``hdfs-site.xml`` + - hive: ``hive-site.xml`` + - mapred: ``mapred-site.xml`` + - pig: ``pig.properties`` + - spark: ``spark-defaults.conf`` + - yarn: ``yarn-site.xml`` + + For more information, see `Cluster + properties `__. + optional_components (Sequence[google.cloud.dataproc_v1.types.Component]): + Optional. The set of components to activate + on the cluster. + """ + + image_version = proto.Field( + proto.STRING, + number=1, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + optional_components = proto.RepeatedField( + proto.ENUM, + number=3, + enum=shared.Component, + ) + + +class LifecycleConfig(proto.Message): + r"""Specifies the cluster auto-delete schedule configuration. + Attributes: + idle_delete_ttl (google.protobuf.duration_pb2.Duration): + Optional. The duration to keep the cluster alive while + idling (when no jobs are running). Passing this threshold + will cause the cluster to be deleted. Minimum value is 5 + minutes; maximum value is 14 days (see JSON representation + of + `Duration `__). + auto_delete_time (google.protobuf.timestamp_pb2.Timestamp): + Optional. The time when cluster will be auto-deleted (see + JSON representation of + `Timestamp `__). + auto_delete_ttl (google.protobuf.duration_pb2.Duration): + Optional. The lifetime duration of cluster. The cluster will + be auto-deleted at the end of this period. Minimum value is + 10 minutes; maximum value is 14 days (see JSON + representation of + `Duration `__). + idle_start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when cluster became idle (most recent + job finished) and became eligible for deletion due to + idleness (see JSON representation of + `Timestamp `__). + """ + + idle_delete_ttl = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + auto_delete_time = proto.Field( + proto.MESSAGE, + number=2, + oneof='ttl', + message=timestamp_pb2.Timestamp, + ) + auto_delete_ttl = proto.Field( + proto.MESSAGE, + number=3, + oneof='ttl', + message=duration_pb2.Duration, + ) + idle_start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class MetastoreConfig(proto.Message): + r"""Specifies a Metastore configuration. + Attributes: + dataproc_metastore_service (str): + Required. Resource name of an existing Dataproc Metastore + service. + + Example: + + - ``projects/[project_id]/locations/[dataproc_region]/services/[service-name]`` + """ + + dataproc_metastore_service = proto.Field( + proto.STRING, + number=1, + ) + + +class ClusterMetrics(proto.Message): + r"""Contains cluster daemon metrics, such as HDFS and YARN stats. + + **Beta Feature**: This report is available for testing purposes + only. It may be changed before final release. + + Attributes: + hdfs_metrics (Sequence[google.cloud.dataproc_v1.types.ClusterMetrics.HdfsMetricsEntry]): + The HDFS metrics. + yarn_metrics (Sequence[google.cloud.dataproc_v1.types.ClusterMetrics.YarnMetricsEntry]): + The YARN metrics. + """ + + hdfs_metrics = proto.MapField( + proto.STRING, + proto.INT64, + number=1, + ) + yarn_metrics = proto.MapField( + proto.STRING, + proto.INT64, + number=2, + ) + + +class CreateClusterRequest(proto.Message): + r"""A request to create a cluster. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster (google.cloud.dataproc_v1.types.Cluster): + Required. The cluster to create. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + `CreateClusterRequest `__\ s + with the same id, then the second request will be ignored + and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + cluster = proto.Field( + proto.MESSAGE, + number=2, + message='Cluster', + ) + request_id = proto.Field( + proto.STRING, + number=4, + ) + + +class UpdateClusterRequest(proto.Message): + r"""A request to update a cluster. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + cluster (google.cloud.dataproc_v1.types.Cluster): + Required. The changes to the cluster. + graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): + Optional. Timeout for graceful YARN decomissioning. Graceful + decommissioning allows removing nodes from the cluster + without interrupting jobs in progress. Timeout specifies how + long to wait for jobs in progress to finish before + forcefully removing nodes (and potentially interrupting + jobs). Default timeout is 0 (for forceful decommission), and + the maximum allowed timeout is 1 day. (see JSON + representation of + `Duration `__). + + Only supported on Dataproc image versions 1.2 and higher. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Specifies the path, relative to ``Cluster``, of + the field to update. For example, to change the number of + workers in a cluster to 5, the ``update_mask`` parameter + would be specified as + ``config.worker_config.num_instances``, and the ``PATCH`` + request body would specify the new value, as follows: + + :: + + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } + + Similarly, to change the number of preemptible workers in a + cluster to 5, the ``update_mask`` parameter would be + ``config.secondary_worker_config.num_instances``, and the + ``PATCH`` request body would be set as follows: + + :: + + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: Currently, only the following fields can be updated: + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + +
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or + change autoscaling policies
+ request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + `UpdateClusterRequest `__\ s + with the same id, then the second request will be ignored + and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=5, + ) + cluster_name = proto.Field( + proto.STRING, + number=2, + ) + cluster = proto.Field( + proto.MESSAGE, + number=3, + message='Cluster', + ) + graceful_decommission_timeout = proto.Field( + proto.MESSAGE, + number=6, + message=duration_pb2.Duration, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=4, + message=field_mask_pb2.FieldMask, + ) + request_id = proto.Field( + proto.STRING, + number=7, + ) + + +class StopClusterRequest(proto.Message): + r"""A request to stop a cluster. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + cluster_uuid (str): + Optional. Specifying the ``cluster_uuid`` means the RPC will + fail (with error NOT_FOUND) if a cluster with the specified + UUID does not exist. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + `StopClusterRequest `__\ s + with the same id, then the second request will be ignored + and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + Recommendation: Set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=2, + ) + cluster_name = proto.Field( + proto.STRING, + number=3, + ) + cluster_uuid = proto.Field( + proto.STRING, + number=4, + ) + request_id = proto.Field( + proto.STRING, + number=5, + ) + + +class StartClusterRequest(proto.Message): + r"""A request to start a cluster. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + cluster_uuid (str): + Optional. Specifying the ``cluster_uuid`` means the RPC will + fail (with error NOT_FOUND) if a cluster with the specified + UUID does not exist. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + `StartClusterRequest `__\ s + with the same id, then the second request will be ignored + and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + Recommendation: Set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=2, + ) + cluster_name = proto.Field( + proto.STRING, + number=3, + ) + cluster_uuid = proto.Field( + proto.STRING, + number=4, + ) + request_id = proto.Field( + proto.STRING, + number=5, + ) + + +class DeleteClusterRequest(proto.Message): + r"""A request to delete a cluster. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + cluster_uuid (str): + Optional. Specifying the ``cluster_uuid`` means the RPC + should fail (with error NOT_FOUND) if cluster with specified + UUID does not exist. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + `DeleteClusterRequest `__\ s + with the same id, then the second request will be ignored + and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + cluster_name = proto.Field( + proto.STRING, + number=2, + ) + cluster_uuid = proto.Field( + proto.STRING, + number=4, + ) + request_id = proto.Field( + proto.STRING, + number=5, + ) + + +class GetClusterRequest(proto.Message): + r"""Request to get the resource representation for a cluster in a + project. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + cluster_name = proto.Field( + proto.STRING, + number=2, + ) + + +class ListClustersRequest(proto.Message): + r"""A request to list the clusters in a project. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + filter (str): + Optional. A filter constraining the clusters to list. + Filters are case-sensitive and have the following syntax: + + field = value [AND [field = value]] ... + + where **field** is one of ``status.state``, ``clusterName``, + or ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** + can be ``*`` to match all values. ``status.state`` can be + one of the following: ``ACTIVE``, ``INACTIVE``, + ``CREATING``, ``RUNNING``, ``ERROR``, ``DELETING``, or + ``UPDATING``. ``ACTIVE`` contains the ``CREATING``, + ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains + the ``DELETING`` and ``ERROR`` states. ``clusterName`` is + the name of the cluster provided at creation time. Only the + logical ``AND`` operator is supported; space-separated items + are treated as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND clusterName = mycluster AND + labels.env = staging AND labels.starred = \* + page_size (int): + Optional. The standard List page size. + page_token (str): + Optional. The standard List page token. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=4, + ) + filter = proto.Field( + proto.STRING, + number=5, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + + +class ListClustersResponse(proto.Message): + r"""The list of all clusters in a project. + Attributes: + clusters (Sequence[google.cloud.dataproc_v1.types.Cluster]): + Output only. The clusters in the project. + next_page_token (str): + Output only. This token is included in the response if there + are more results to fetch. To fetch additional results, + provide this value as the ``page_token`` in a subsequent + ``ListClustersRequest``. + """ + + @property + def raw_page(self): + return self + + clusters = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Cluster', + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DiagnoseClusterRequest(proto.Message): + r"""A request to collect cluster diagnostic information. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + cluster_name = proto.Field( + proto.STRING, + number=2, + ) + + +class DiagnoseClusterResults(proto.Message): + r"""The location of diagnostic output. + Attributes: + output_uri (str): + Output only. The Cloud Storage URI of the + diagnostic output. The output report is a plain + text file with a summary of collected + diagnostics. + """ + + output_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class ReservationAffinity(proto.Message): + r"""Reservation Affinity for consuming Zonal reservation. + Attributes: + consume_reservation_type (google.cloud.dataproc_v1.types.ReservationAffinity.Type): + Optional. Type of reservation to consume + key (str): + Optional. Corresponds to the label key of + reservation resource. + values (Sequence[str]): + Optional. Corresponds to the label values of + reservation resource. + """ + class Type(proto.Enum): + r"""Indicates whether to consume capacity from an reservation or + not. + """ + TYPE_UNSPECIFIED = 0 + NO_RESERVATION = 1 + ANY_RESERVATION = 2 + SPECIFIC_RESERVATION = 3 + + consume_reservation_type = proto.Field( + proto.ENUM, + number=1, + enum=Type, + ) + key = proto.Field( + proto.STRING, + number=2, + ) + values = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/jobs.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/jobs.py new file mode 100644 index 00000000..e61a7e97 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/jobs.py @@ -0,0 +1,1368 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.dataproc.v1', + manifest={ + 'LoggingConfig', + 'HadoopJob', + 'SparkJob', + 'PySparkJob', + 'QueryList', + 'HiveJob', + 'SparkSqlJob', + 'PigJob', + 'SparkRJob', + 'PrestoJob', + 'JobPlacement', + 'JobStatus', + 'JobReference', + 'YarnApplication', + 'Job', + 'JobScheduling', + 'SubmitJobRequest', + 'JobMetadata', + 'GetJobRequest', + 'ListJobsRequest', + 'UpdateJobRequest', + 'ListJobsResponse', + 'CancelJobRequest', + 'DeleteJobRequest', + }, +) + + +class LoggingConfig(proto.Message): + r"""The runtime logging config of the job. + Attributes: + driver_log_levels (Sequence[google.cloud.dataproc_v1.types.LoggingConfig.DriverLogLevelsEntry]): + The per-package log levels for the driver. + This may include "root" package name to + configure rootLogger. Examples: + 'com.google = FATAL', 'root = INFO', + 'org.apache = DEBUG' + """ + class Level(proto.Enum): + r"""The Log4j level for job execution. When running an `Apache + Hive `__ job, Cloud Dataproc configures the + Hive client to an equivalent verbosity level. + """ + LEVEL_UNSPECIFIED = 0 + ALL = 1 + TRACE = 2 + DEBUG = 3 + INFO = 4 + WARN = 5 + ERROR = 6 + FATAL = 7 + OFF = 8 + + driver_log_levels = proto.MapField( + proto.STRING, + proto.ENUM, + number=2, + enum=Level, + ) + + +class HadoopJob(proto.Message): + r"""A Dataproc job for running `Apache Hadoop + MapReduce `__ + jobs on `Apache Hadoop + YARN `__. + + Attributes: + main_jar_file_uri (str): + The HCFS URI of the jar file containing the + main class. Examples: + 'gs://foo-bucket/analytics-binaries/extract- + useful-metrics-mr.jar' 'hdfs:/tmp/test- + samples/custom-wordcount.jar' + 'file:///home/usr/lib/hadoop-mapreduce/hadoop- + mapreduce-examples.jar' + main_class (str): + The name of the driver's main class. The jar file containing + the class must be in the default CLASSPATH or specified in + ``jar_file_uris``. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``-libjars`` or ``-Dfoo=bar``, + that can be set as job properties, since a collision may + occur that causes an incorrect job submission. + jar_file_uris (Sequence[str]): + Optional. Jar file URIs to add to the + CLASSPATHs of the Hadoop driver and tasks. + file_uris (Sequence[str]): + Optional. HCFS (Hadoop Compatible Filesystem) + URIs of files to be copied to the working + directory of Hadoop drivers and distributed + tasks. Useful for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted in the working directory of Hadoop + drivers and tasks. Supported file types: .jar, + .tar, .tar.gz, .tgz, or .zip. + properties (Sequence[google.cloud.dataproc_v1.types.HadoopJob.PropertiesEntry]): + Optional. A mapping of property names to values, used to + configure Hadoop. Properties that conflict with values set + by the Dataproc API may be overwritten. Can include + properties set in /etc/hadoop/conf/*-site and classes in + user code. + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_jar_file_uri = proto.Field( + proto.STRING, + number=1, + oneof='driver', + ) + main_class = proto.Field( + proto.STRING, + number=2, + oneof='driver', + ) + args = proto.RepeatedField( + proto.STRING, + number=3, + ) + jar_file_uris = proto.RepeatedField( + proto.STRING, + number=4, + ) + file_uris = proto.RepeatedField( + proto.STRING, + number=5, + ) + archive_uris = proto.RepeatedField( + proto.STRING, + number=6, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=8, + message='LoggingConfig', + ) + + +class SparkJob(proto.Message): + r"""A Dataproc job for running `Apache + Spark `__ applications on YARN. + + Attributes: + main_jar_file_uri (str): + The HCFS URI of the jar file that contains + the main class. + main_class (str): + The name of the driver's main class. The jar file that + contains the class must be in the default CLASSPATH or + specified in ``jar_file_uris``. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``--conf``, that can be set as + job properties, since a collision may occur that causes an + incorrect job submission. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATHs of the Spark driver and tasks. + file_uris (Sequence[str]): + Optional. HCFS URIs of files to be placed in + the working directory of each executor. Useful + for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted into the working directory of each + executor. Supported file types: .jar, .tar, + .tar.gz, .tgz, and .zip. + properties (Sequence[google.cloud.dataproc_v1.types.SparkJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure Spark. Properties that + conflict with values set by the Dataproc API may + be overwritten. Can include properties set in + /etc/spark/conf/spark-defaults.conf and classes + in user code. + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_jar_file_uri = proto.Field( + proto.STRING, + number=1, + oneof='driver', + ) + main_class = proto.Field( + proto.STRING, + number=2, + oneof='driver', + ) + args = proto.RepeatedField( + proto.STRING, + number=3, + ) + jar_file_uris = proto.RepeatedField( + proto.STRING, + number=4, + ) + file_uris = proto.RepeatedField( + proto.STRING, + number=5, + ) + archive_uris = proto.RepeatedField( + proto.STRING, + number=6, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=8, + message='LoggingConfig', + ) + + +class PySparkJob(proto.Message): + r"""A Dataproc job for running `Apache + PySpark `__ + applications on YARN. + + Attributes: + main_python_file_uri (str): + Required. The HCFS URI of the main Python + file to use as the driver. Must be a .py file. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``--conf``, that can be set as + job properties, since a collision may occur that causes an + incorrect job submission. + python_file_uris (Sequence[str]): + Optional. HCFS file URIs of Python files to + pass to the PySpark framework. Supported file + types: .py, .egg, and .zip. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATHs of the Python driver and tasks. + file_uris (Sequence[str]): + Optional. HCFS URIs of files to be placed in + the working directory of each executor. Useful + for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted into the working directory of each + executor. Supported file types: .jar, .tar, + .tar.gz, .tgz, and .zip. + properties (Sequence[google.cloud.dataproc_v1.types.PySparkJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure PySpark. Properties + that conflict with values set by the Dataproc + API may be overwritten. Can include properties + set in + /etc/spark/conf/spark-defaults.conf and classes + in user code. + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_python_file_uri = proto.Field( + proto.STRING, + number=1, + ) + args = proto.RepeatedField( + proto.STRING, + number=2, + ) + python_file_uris = proto.RepeatedField( + proto.STRING, + number=3, + ) + jar_file_uris = proto.RepeatedField( + proto.STRING, + number=4, + ) + file_uris = proto.RepeatedField( + proto.STRING, + number=5, + ) + archive_uris = proto.RepeatedField( + proto.STRING, + number=6, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=8, + message='LoggingConfig', + ) + + +class QueryList(proto.Message): + r"""A list of queries to run on a cluster. + Attributes: + queries (Sequence[str]): + Required. The queries to execute. You do not need to end a + query expression with a semicolon. Multiple queries can be + specified in one string by separating each with a semicolon. + Here is an example of a Dataproc API snippet that uses a + QueryList to specify a HiveJob: + + :: + + "hiveJob": { + "queryList": { + "queries": [ + "query1", + "query2", + "query3;query4", + ] + } + } + """ + + queries = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class HiveJob(proto.Message): + r"""A Dataproc job for running `Apache + Hive `__ queries on YARN. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains Hive + queries. + query_list (google.cloud.dataproc_v1.types.QueryList): + A list of queries. + continue_on_failure (bool): + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` + can be useful when executing independent parallel queries. + script_variables (Sequence[google.cloud.dataproc_v1.types.HiveJob.ScriptVariablesEntry]): + Optional. Mapping of query variable names to values + (equivalent to the Hive command: ``SET name="value";``). + properties (Sequence[google.cloud.dataproc_v1.types.HiveJob.PropertiesEntry]): + Optional. A mapping of property names and values, used to + configure Hive. Properties that conflict with values set by + the Dataproc API may be overwritten. Can include properties + set in /etc/hadoop/conf/*-site.xml, + /etc/hive/conf/hive-site.xml, and classes in user code. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATH of the Hive server and Hadoop + MapReduce (MR) tasks. Can contain Hive SerDes + and UDFs. + """ + + query_file_uri = proto.Field( + proto.STRING, + number=1, + oneof='queries', + ) + query_list = proto.Field( + proto.MESSAGE, + number=2, + oneof='queries', + message='QueryList', + ) + continue_on_failure = proto.Field( + proto.BOOL, + number=3, + ) + script_variables = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + jar_file_uris = proto.RepeatedField( + proto.STRING, + number=6, + ) + + +class SparkSqlJob(proto.Message): + r"""A Dataproc job for running `Apache Spark + SQL `__ queries. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains SQL + queries. + query_list (google.cloud.dataproc_v1.types.QueryList): + A list of queries. + script_variables (Sequence[google.cloud.dataproc_v1.types.SparkSqlJob.ScriptVariablesEntry]): + Optional. Mapping of query variable names to values + (equivalent to the Spark SQL command: SET + ``name="value";``). + properties (Sequence[google.cloud.dataproc_v1.types.SparkSqlJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure Spark SQL's SparkConf. + Properties that conflict with values set by the + Dataproc API may be overwritten. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to be added + to the Spark CLASSPATH. + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + query_file_uri = proto.Field( + proto.STRING, + number=1, + oneof='queries', + ) + query_list = proto.Field( + proto.MESSAGE, + number=2, + oneof='queries', + message='QueryList', + ) + script_variables = proto.MapField( + proto.STRING, + proto.STRING, + number=3, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + jar_file_uris = proto.RepeatedField( + proto.STRING, + number=56, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=6, + message='LoggingConfig', + ) + + +class PigJob(proto.Message): + r"""A Dataproc job for running `Apache Pig `__ + queries on YARN. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains the + Pig queries. + query_list (google.cloud.dataproc_v1.types.QueryList): + A list of queries. + continue_on_failure (bool): + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` + can be useful when executing independent parallel queries. + script_variables (Sequence[google.cloud.dataproc_v1.types.PigJob.ScriptVariablesEntry]): + Optional. Mapping of query variable names to values + (equivalent to the Pig command: ``name=[value]``). + properties (Sequence[google.cloud.dataproc_v1.types.PigJob.PropertiesEntry]): + Optional. A mapping of property names to values, used to + configure Pig. Properties that conflict with values set by + the Dataproc API may be overwritten. Can include properties + set in /etc/hadoop/conf/*-site.xml, + /etc/pig/conf/pig.properties, and classes in user code. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATH of the Pig Client and Hadoop + MapReduce (MR) tasks. Can contain Pig UDFs. + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + query_file_uri = proto.Field( + proto.STRING, + number=1, + oneof='queries', + ) + query_list = proto.Field( + proto.MESSAGE, + number=2, + oneof='queries', + message='QueryList', + ) + continue_on_failure = proto.Field( + proto.BOOL, + number=3, + ) + script_variables = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + jar_file_uris = proto.RepeatedField( + proto.STRING, + number=6, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=7, + message='LoggingConfig', + ) + + +class SparkRJob(proto.Message): + r"""A Dataproc job for running `Apache + SparkR `__ + applications on YARN. + + Attributes: + main_r_file_uri (str): + Required. The HCFS URI of the main R file to + use as the driver. Must be a .R file. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``--conf``, that can be set as + job properties, since a collision may occur that causes an + incorrect job submission. + file_uris (Sequence[str]): + Optional. HCFS URIs of files to be placed in + the working directory of each executor. Useful + for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted into the working directory of each + executor. Supported file types: .jar, .tar, + .tar.gz, .tgz, and .zip. + properties (Sequence[google.cloud.dataproc_v1.types.SparkRJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure SparkR. Properties + that conflict with values set by the Dataproc + API may be overwritten. Can include properties + set in + /etc/spark/conf/spark-defaults.conf and classes + in user code. + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_r_file_uri = proto.Field( + proto.STRING, + number=1, + ) + args = proto.RepeatedField( + proto.STRING, + number=2, + ) + file_uris = proto.RepeatedField( + proto.STRING, + number=3, + ) + archive_uris = proto.RepeatedField( + proto.STRING, + number=4, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=6, + message='LoggingConfig', + ) + + +class PrestoJob(proto.Message): + r"""A Dataproc job for running `Presto `__ + queries. **IMPORTANT**: The `Dataproc Presto Optional + Component `__ + must be enabled when the cluster is created to submit a Presto job + to the cluster. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains SQL + queries. + query_list (google.cloud.dataproc_v1.types.QueryList): + A list of queries. + continue_on_failure (bool): + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` + can be useful when executing independent parallel queries. + output_format (str): + Optional. The format in which query output + will be displayed. See the Presto documentation + for supported output formats + client_tags (Sequence[str]): + Optional. Presto client tags to attach to + this query + properties (Sequence[google.cloud.dataproc_v1.types.PrestoJob.PropertiesEntry]): + Optional. A mapping of property names to values. Used to set + Presto `session + properties `__ + Equivalent to using the --session flag in the Presto CLI + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + query_file_uri = proto.Field( + proto.STRING, + number=1, + oneof='queries', + ) + query_list = proto.Field( + proto.MESSAGE, + number=2, + oneof='queries', + message='QueryList', + ) + continue_on_failure = proto.Field( + proto.BOOL, + number=3, + ) + output_format = proto.Field( + proto.STRING, + number=4, + ) + client_tags = proto.RepeatedField( + proto.STRING, + number=5, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=7, + message='LoggingConfig', + ) + + +class JobPlacement(proto.Message): + r"""Dataproc job config. + Attributes: + cluster_name (str): + Required. The name of the cluster where the + job will be submitted. + cluster_uuid (str): + Output only. A cluster UUID generated by the + Dataproc service when the job is submitted. + cluster_labels (Sequence[google.cloud.dataproc_v1.types.JobPlacement.ClusterLabelsEntry]): + Optional. Cluster labels to identify a + cluster where the job will be submitted. + """ + + cluster_name = proto.Field( + proto.STRING, + number=1, + ) + cluster_uuid = proto.Field( + proto.STRING, + number=2, + ) + cluster_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=3, + ) + + +class JobStatus(proto.Message): + r"""Dataproc job status. + Attributes: + state (google.cloud.dataproc_v1.types.JobStatus.State): + Output only. A state message specifying the + overall job state. + details (str): + Optional. Output only. Job state details, + such as an error description if the state is + ERROR. + state_start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when this state was + entered. + substate (google.cloud.dataproc_v1.types.JobStatus.Substate): + Output only. Additional state information, + which includes status reported by the agent. + """ + class State(proto.Enum): + r"""The job state.""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + SETUP_DONE = 8 + RUNNING = 2 + CANCEL_PENDING = 3 + CANCEL_STARTED = 7 + CANCELLED = 4 + DONE = 5 + ERROR = 6 + ATTEMPT_FAILURE = 9 + + class Substate(proto.Enum): + r"""The job substate.""" + UNSPECIFIED = 0 + SUBMITTED = 1 + QUEUED = 2 + STALE_STATUS = 3 + + state = proto.Field( + proto.ENUM, + number=1, + enum=State, + ) + details = proto.Field( + proto.STRING, + number=2, + ) + state_start_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + substate = proto.Field( + proto.ENUM, + number=7, + enum=Substate, + ) + + +class JobReference(proto.Message): + r"""Encapsulates the full scoping used to reference a job. + Attributes: + project_id (str): + Optional. The ID of the Google Cloud Platform + project that the job belongs to. If specified, + must match the request project ID. + job_id (str): + Optional. The job ID, which must be unique within the + project. + + The ID must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), or hyphens (-). The maximum length is 100 + characters. + + If not specified by the caller, the job ID will be provided + by the server. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + job_id = proto.Field( + proto.STRING, + number=2, + ) + + +class YarnApplication(proto.Message): + r"""A YARN application created by a job. Application information is a + subset of + org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. + + **Beta Feature**: This report is available for testing purposes + only. It may be changed before final release. + + Attributes: + name (str): + Required. The application name. + state (google.cloud.dataproc_v1.types.YarnApplication.State): + Required. The application state. + progress (float): + Required. The numerical progress of the + application, from 1 to 100. + tracking_url (str): + Optional. The HTTP URL of the + ApplicationMaster, HistoryServer, or + TimelineServer that provides application- + specific information. The URL uses the internal + hostname, and requires a proxy server for + resolution and, possibly, access. + """ + class State(proto.Enum): + r"""The application state, corresponding to + YarnProtos.YarnApplicationStateProto. + """ + STATE_UNSPECIFIED = 0 + NEW = 1 + NEW_SAVING = 2 + SUBMITTED = 3 + ACCEPTED = 4 + RUNNING = 5 + FINISHED = 6 + FAILED = 7 + KILLED = 8 + + name = proto.Field( + proto.STRING, + number=1, + ) + state = proto.Field( + proto.ENUM, + number=2, + enum=State, + ) + progress = proto.Field( + proto.FLOAT, + number=3, + ) + tracking_url = proto.Field( + proto.STRING, + number=4, + ) + + +class Job(proto.Message): + r"""A Dataproc job resource. + Attributes: + reference (google.cloud.dataproc_v1.types.JobReference): + Optional. The fully qualified reference to the job, which + can be used to obtain the equivalent REST path of the job + resource. If this property is not specified when a job is + created, the server generates a job_id. + placement (google.cloud.dataproc_v1.types.JobPlacement): + Required. Job information, including how, + when, and where to run the job. + hadoop_job (google.cloud.dataproc_v1.types.HadoopJob): + Optional. Job is a Hadoop job. + spark_job (google.cloud.dataproc_v1.types.SparkJob): + Optional. Job is a Spark job. + pyspark_job (google.cloud.dataproc_v1.types.PySparkJob): + Optional. Job is a PySpark job. + hive_job (google.cloud.dataproc_v1.types.HiveJob): + Optional. Job is a Hive job. + pig_job (google.cloud.dataproc_v1.types.PigJob): + Optional. Job is a Pig job. + spark_r_job (google.cloud.dataproc_v1.types.SparkRJob): + Optional. Job is a SparkR job. + spark_sql_job (google.cloud.dataproc_v1.types.SparkSqlJob): + Optional. Job is a SparkSql job. + presto_job (google.cloud.dataproc_v1.types.PrestoJob): + Optional. Job is a Presto job. + status (google.cloud.dataproc_v1.types.JobStatus): + Output only. The job status. Additional application-specific + status information may be contained in the type_job and + yarn_applications fields. + status_history (Sequence[google.cloud.dataproc_v1.types.JobStatus]): + Output only. The previous job status. + yarn_applications (Sequence[google.cloud.dataproc_v1.types.YarnApplication]): + Output only. The collection of YARN applications spun up by + this job. + + **Beta** Feature: This report is available for testing + purposes only. It may be changed before final release. + driver_output_resource_uri (str): + Output only. A URI pointing to the location + of the stdout of the job's driver program. + driver_control_files_uri (str): + Output only. If present, the location of miscellaneous + control files which may be used as part of job setup and + handling. If not present, control files may be placed in the + same location as ``driver_output_uri``. + labels (Sequence[google.cloud.dataproc_v1.types.Job.LabelsEntry]): + Optional. The labels to associate with this job. Label + **keys** must contain 1 to 63 characters, and must conform + to `RFC 1035 `__. + Label **values** may be empty, but, if present, must contain + 1 to 63 characters, and must conform to `RFC + 1035 `__. No more than + 32 labels can be associated with a job. + scheduling (google.cloud.dataproc_v1.types.JobScheduling): + Optional. Job scheduling configuration. + job_uuid (str): + Output only. A UUID that uniquely identifies a job within + the project over time. This is in contrast to a + user-settable reference.job_id that may be reused over time. + done (bool): + Output only. Indicates whether the job is completed. If the + value is ``false``, the job is still in progress. If + ``true``, the job is completed, and ``status.state`` field + will indicate if it was successful, failed, or cancelled. + """ + + reference = proto.Field( + proto.MESSAGE, + number=1, + message='JobReference', + ) + placement = proto.Field( + proto.MESSAGE, + number=2, + message='JobPlacement', + ) + hadoop_job = proto.Field( + proto.MESSAGE, + number=3, + oneof='type_job', + message='HadoopJob', + ) + spark_job = proto.Field( + proto.MESSAGE, + number=4, + oneof='type_job', + message='SparkJob', + ) + pyspark_job = proto.Field( + proto.MESSAGE, + number=5, + oneof='type_job', + message='PySparkJob', + ) + hive_job = proto.Field( + proto.MESSAGE, + number=6, + oneof='type_job', + message='HiveJob', + ) + pig_job = proto.Field( + proto.MESSAGE, + number=7, + oneof='type_job', + message='PigJob', + ) + spark_r_job = proto.Field( + proto.MESSAGE, + number=21, + oneof='type_job', + message='SparkRJob', + ) + spark_sql_job = proto.Field( + proto.MESSAGE, + number=12, + oneof='type_job', + message='SparkSqlJob', + ) + presto_job = proto.Field( + proto.MESSAGE, + number=23, + oneof='type_job', + message='PrestoJob', + ) + status = proto.Field( + proto.MESSAGE, + number=8, + message='JobStatus', + ) + status_history = proto.RepeatedField( + proto.MESSAGE, + number=13, + message='JobStatus', + ) + yarn_applications = proto.RepeatedField( + proto.MESSAGE, + number=9, + message='YarnApplication', + ) + driver_output_resource_uri = proto.Field( + proto.STRING, + number=17, + ) + driver_control_files_uri = proto.Field( + proto.STRING, + number=15, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=18, + ) + scheduling = proto.Field( + proto.MESSAGE, + number=20, + message='JobScheduling', + ) + job_uuid = proto.Field( + proto.STRING, + number=22, + ) + done = proto.Field( + proto.BOOL, + number=24, + ) + + +class JobScheduling(proto.Message): + r"""Job scheduling options. + Attributes: + max_failures_per_hour (int): + Optional. Maximum number of times per hour a + driver may be restarted as a result of driver + exiting with non-zero code before job is + reported failed. + + A job may be reported as thrashing if driver + exits with non-zero code 4 times within 10 + minute window. + + Maximum value is 10. + max_failures_total (int): + Optional. Maximum number of times in total a + driver may be restarted as a result of driver + exiting with non-zero code before job is + reported failed. Maximum value is 240. + """ + + max_failures_per_hour = proto.Field( + proto.INT32, + number=1, + ) + max_failures_total = proto.Field( + proto.INT32, + number=2, + ) + + +class SubmitJobRequest(proto.Message): + r"""A request to submit a job. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job (google.cloud.dataproc_v1.types.Job): + Required. The job resource. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + `SubmitJobRequest `__\ s + with the same id, then the second request will be ignored + and the first [Job][google.cloud.dataproc.v1.Job] created + and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + job = proto.Field( + proto.MESSAGE, + number=2, + message='Job', + ) + request_id = proto.Field( + proto.STRING, + number=4, + ) + + +class JobMetadata(proto.Message): + r"""Job Operation metadata. + Attributes: + job_id (str): + Output only. The job id. + status (google.cloud.dataproc_v1.types.JobStatus): + Output only. Most recent job status. + operation_type (str): + Output only. Operation type. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Job submission time. + """ + + job_id = proto.Field( + proto.STRING, + number=1, + ) + status = proto.Field( + proto.MESSAGE, + number=2, + message='JobStatus', + ) + operation_type = proto.Field( + proto.STRING, + number=3, + ) + start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class GetJobRequest(proto.Message): + r"""A request to get the resource representation for a job in a + project. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + job_id = proto.Field( + proto.STRING, + number=2, + ) + + +class ListJobsRequest(proto.Message): + r"""A request to list jobs in a project. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + page_size (int): + Optional. The number of results to return in + each response. + page_token (str): + Optional. The page token, returned by a + previous call, to request the next page of + results. + cluster_name (str): + Optional. If set, the returned jobs list + includes only jobs that were submitted to the + named cluster. + job_state_matcher (google.cloud.dataproc_v1.types.ListJobsRequest.JobStateMatcher): + Optional. Specifies enumerated categories of jobs to list. + (default = match ALL jobs). + + If ``filter`` is provided, ``jobStateMatcher`` will be + ignored. + filter (str): + Optional. A filter constraining the jobs to list. Filters + are case-sensitive and have the following syntax: + + [field = value] AND [field [= value]] ... + + where **field** is ``status.state`` or ``labels.[KEY]``, and + ``[KEY]`` is a label key. **value** can be ``*`` to match + all values. ``status.state`` can be either ``ACTIVE`` or + ``NON_ACTIVE``. Only the logical ``AND`` operator is + supported; space-separated items are treated as having an + implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND labels.env = staging AND + labels.starred = \* + """ + class JobStateMatcher(proto.Enum): + r"""A matcher that specifies categories of job states.""" + ALL = 0 + ACTIVE = 1 + NON_ACTIVE = 2 + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=6, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + cluster_name = proto.Field( + proto.STRING, + number=4, + ) + job_state_matcher = proto.Field( + proto.ENUM, + number=5, + enum=JobStateMatcher, + ) + filter = proto.Field( + proto.STRING, + number=7, + ) + + +class UpdateJobRequest(proto.Message): + r"""A request to update a job. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + job (google.cloud.dataproc_v1.types.Job): + Required. The changes to the job. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Specifies the path, relative to Job, of the field + to update. For example, to update the labels of a Job the + update_mask parameter would be specified as labels, and the + ``PATCH`` request body would specify the new value. Note: + Currently, labels is the only field that can be updated. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=2, + ) + job_id = proto.Field( + proto.STRING, + number=3, + ) + job = proto.Field( + proto.MESSAGE, + number=4, + message='Job', + ) + update_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListJobsResponse(proto.Message): + r"""A list of jobs in a project. + Attributes: + jobs (Sequence[google.cloud.dataproc_v1.types.Job]): + Output only. Jobs list. + next_page_token (str): + Optional. This token is included in the response if there + are more results to fetch. To fetch additional results, + provide this value as the ``page_token`` in a subsequent + ListJobsRequest. + """ + + @property + def raw_page(self): + return self + + jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Job', + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class CancelJobRequest(proto.Message): + r"""A request to cancel a job. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + job_id = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteJobRequest(proto.Message): + r"""A request to delete a job. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + job_id = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/operations.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/operations.py new file mode 100644 index 00000000..e3894a2a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/operations.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.dataproc.v1', + manifest={ + 'ClusterOperationStatus', + 'ClusterOperationMetadata', + }, +) + + +class ClusterOperationStatus(proto.Message): + r"""The status of the operation. + Attributes: + state (google.cloud.dataproc_v1.types.ClusterOperationStatus.State): + Output only. A message containing the + operation state. + inner_state (str): + Output only. A message containing the + detailed operation state. + details (str): + Output only. A message containing any + operation metadata details. + state_start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time this state was entered. + """ + class State(proto.Enum): + r"""The operation state.""" + UNKNOWN = 0 + PENDING = 1 + RUNNING = 2 + DONE = 3 + + state = proto.Field( + proto.ENUM, + number=1, + enum=State, + ) + inner_state = proto.Field( + proto.STRING, + number=2, + ) + details = proto.Field( + proto.STRING, + number=3, + ) + state_start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class ClusterOperationMetadata(proto.Message): + r"""Metadata describing the operation. + Attributes: + cluster_name (str): + Output only. Name of the cluster for the + operation. + cluster_uuid (str): + Output only. Cluster UUID for the operation. + status (google.cloud.dataproc_v1.types.ClusterOperationStatus): + Output only. Current operation status. + status_history (Sequence[google.cloud.dataproc_v1.types.ClusterOperationStatus]): + Output only. The previous operation status. + operation_type (str): + Output only. The operation type. + description (str): + Output only. Short description of operation. + labels (Sequence[google.cloud.dataproc_v1.types.ClusterOperationMetadata.LabelsEntry]): + Output only. Labels associated with the + operation + warnings (Sequence[str]): + Output only. Errors encountered during + operation execution. + """ + + cluster_name = proto.Field( + proto.STRING, + number=7, + ) + cluster_uuid = proto.Field( + proto.STRING, + number=8, + ) + status = proto.Field( + proto.MESSAGE, + number=9, + message='ClusterOperationStatus', + ) + status_history = proto.RepeatedField( + proto.MESSAGE, + number=10, + message='ClusterOperationStatus', + ) + operation_type = proto.Field( + proto.STRING, + number=11, + ) + description = proto.Field( + proto.STRING, + number=12, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=13, + ) + warnings = proto.RepeatedField( + proto.STRING, + number=14, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/shared.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/shared.py new file mode 100644 index 00000000..69371c0d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/shared.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.dataproc.v1', + manifest={ + 'Component', + }, +) + + +class Component(proto.Enum): + r"""Cluster components that can be activated. + Next ID: 16. + """ + COMPONENT_UNSPECIFIED = 0 + ANACONDA = 5 + DOCKER = 13 + DRUID = 9 + FLINK = 14 + HBASE = 11 + HIVE_WEBHCAT = 3 + JUPYTER = 1 + PRESTO = 6 + RANGER = 12 + SOLR = 10 + ZEPPELIN = 4 + ZOOKEEPER = 8 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/workflow_templates.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/workflow_templates.py new file mode 100644 index 00000000..1c23ff3f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/workflow_templates.py @@ -0,0 +1,1050 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import jobs as gcd_jobs +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.dataproc.v1', + manifest={ + 'WorkflowTemplate', + 'WorkflowTemplatePlacement', + 'ManagedCluster', + 'ClusterSelector', + 'OrderedJob', + 'TemplateParameter', + 'ParameterValidation', + 'RegexValidation', + 'ValueValidation', + 'WorkflowMetadata', + 'ClusterOperation', + 'WorkflowGraph', + 'WorkflowNode', + 'CreateWorkflowTemplateRequest', + 'GetWorkflowTemplateRequest', + 'InstantiateWorkflowTemplateRequest', + 'InstantiateInlineWorkflowTemplateRequest', + 'UpdateWorkflowTemplateRequest', + 'ListWorkflowTemplatesRequest', + 'ListWorkflowTemplatesResponse', + 'DeleteWorkflowTemplateRequest', + }, +) + + +class WorkflowTemplate(proto.Message): + r"""A Dataproc workflow template resource. + Attributes: + id (str): + + name (str): + Output only. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates``, the resource + name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates``, the + resource name of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. Used to perform a consistent read-modify-write. + + This field should be left blank for a + ``CreateWorkflowTemplate`` request. It is required for an + ``UpdateWorkflowTemplate`` request, and must match the + current server version. A typical update template flow would + fetch the current template with a ``GetWorkflowTemplate`` + request, which will return the current template with the + ``version`` field filled in with the current server version. + The user updates other fields in the template, then returns + it as part of the ``UpdateWorkflowTemplate`` request. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time template was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time template was last + updated. + labels (Sequence[google.cloud.dataproc_v1.types.WorkflowTemplate.LabelsEntry]): + Optional. The labels to associate with this template. These + labels will be propagated to all jobs and clusters created + by the workflow instance. + + Label **keys** must contain 1 to 63 characters, and must + conform to `RFC + 1035 `__. + + Label **values** may be empty, but, if present, must contain + 1 to 63 characters, and must conform to `RFC + 1035 `__. + + No more than 32 labels can be associated with a template. + placement (google.cloud.dataproc_v1.types.WorkflowTemplatePlacement): + Required. WorkflowTemplate scheduling + information. + jobs (Sequence[google.cloud.dataproc_v1.types.OrderedJob]): + Required. The Directed Acyclic Graph of Jobs + to submit. + parameters (Sequence[google.cloud.dataproc_v1.types.TemplateParameter]): + Optional. Template parameters whose values + are substituted into the template. Values for + parameters must be provided when the template is + instantiated. + dag_timeout (google.protobuf.duration_pb2.Duration): + Optional. Timeout duration for the DAG of jobs, expressed in + seconds (see `JSON representation of + duration `__). + The timeout duration must be from 10 minutes ("600s") to 24 + hours ("86400s"). The timer begins when the first job is + submitted. If the workflow is running at the end of the + timeout period, any remaining jobs are cancelled, the + workflow is ended, and if the workflow was running on a + `managed + cluster `__, + the cluster is deleted. + """ + + id = proto.Field( + proto.STRING, + number=2, + ) + name = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.INT32, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + placement = proto.Field( + proto.MESSAGE, + number=7, + message='WorkflowTemplatePlacement', + ) + jobs = proto.RepeatedField( + proto.MESSAGE, + number=8, + message='OrderedJob', + ) + parameters = proto.RepeatedField( + proto.MESSAGE, + number=9, + message='TemplateParameter', + ) + dag_timeout = proto.Field( + proto.MESSAGE, + number=10, + message=duration_pb2.Duration, + ) + + +class WorkflowTemplatePlacement(proto.Message): + r"""Specifies workflow execution target. + + Either ``managed_cluster`` or ``cluster_selector`` is required. + + Attributes: + managed_cluster (google.cloud.dataproc_v1.types.ManagedCluster): + A cluster that is managed by the workflow. + cluster_selector (google.cloud.dataproc_v1.types.ClusterSelector): + Optional. A selector that chooses target + cluster for jobs based on metadata. + + The selector is evaluated at the time each job + is submitted. + """ + + managed_cluster = proto.Field( + proto.MESSAGE, + number=1, + oneof='placement', + message='ManagedCluster', + ) + cluster_selector = proto.Field( + proto.MESSAGE, + number=2, + oneof='placement', + message='ClusterSelector', + ) + + +class ManagedCluster(proto.Message): + r"""Cluster that is managed by the workflow. + Attributes: + cluster_name (str): + Required. The cluster name prefix. A unique + cluster name will be formed by appending a + random suffix. + The name must contain only lower-case letters + (a-z), numbers (0-9), and hyphens (-). Must + begin with a letter. Cannot begin or end with + hyphen. Must consist of between 2 and 35 + characters. + config (google.cloud.dataproc_v1.types.ClusterConfig): + Required. The cluster configuration. + labels (Sequence[google.cloud.dataproc_v1.types.ManagedCluster.LabelsEntry]): + Optional. The labels to associate with this cluster. + + Label keys must be between 1 and 63 characters long, and + must conform to the following PCRE regular expression: + [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + + Label values must be between 1 and 63 characters long, and + must conform to the following PCRE regular expression: + [\p{Ll}\p{Lo}\p{N}_-]{0,63} + + No more than 32 labels can be associated with a given + cluster. + """ + + cluster_name = proto.Field( + proto.STRING, + number=2, + ) + config = proto.Field( + proto.MESSAGE, + number=3, + message=clusters.ClusterConfig, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + + +class ClusterSelector(proto.Message): + r"""A selector that chooses target cluster for jobs based on + metadata. + + Attributes: + zone (str): + Optional. The zone where workflow process + executes. This parameter does not affect the + selection of the cluster. + If unspecified, the zone of the first cluster + matching the selector is used. + cluster_labels (Sequence[google.cloud.dataproc_v1.types.ClusterSelector.ClusterLabelsEntry]): + Required. The cluster labels. Cluster must + have all labels to match. + """ + + zone = proto.Field( + proto.STRING, + number=1, + ) + cluster_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + + +class OrderedJob(proto.Message): + r"""A job executed by the workflow. + Attributes: + step_id (str): + Required. The step id. The id must be unique among all jobs + within the template. + + The step id is used as prefix for job id, as job + ``goog-dataproc-workflow-step-id`` label, and in + [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + field from other steps. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). Cannot begin or end with + underscore or hyphen. Must consist of between 3 and 50 + characters. + hadoop_job (google.cloud.dataproc_v1.types.HadoopJob): + Optional. Job is a Hadoop job. + spark_job (google.cloud.dataproc_v1.types.SparkJob): + Optional. Job is a Spark job. + pyspark_job (google.cloud.dataproc_v1.types.PySparkJob): + Optional. Job is a PySpark job. + hive_job (google.cloud.dataproc_v1.types.HiveJob): + Optional. Job is a Hive job. + pig_job (google.cloud.dataproc_v1.types.PigJob): + Optional. Job is a Pig job. + spark_r_job (google.cloud.dataproc_v1.types.SparkRJob): + Optional. Job is a SparkR job. + spark_sql_job (google.cloud.dataproc_v1.types.SparkSqlJob): + Optional. Job is a SparkSql job. + presto_job (google.cloud.dataproc_v1.types.PrestoJob): + Optional. Job is a Presto job. + labels (Sequence[google.cloud.dataproc_v1.types.OrderedJob.LabelsEntry]): + Optional. The labels to associate with this job. + + Label keys must be between 1 and 63 characters long, and + must conform to the following regular expression: + [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + + Label values must be between 1 and 63 characters long, and + must conform to the following regular expression: + [\p{Ll}\p{Lo}\p{N}_-]{0,63} + + No more than 32 labels can be associated with a given job. + scheduling (google.cloud.dataproc_v1.types.JobScheduling): + Optional. Job scheduling configuration. + prerequisite_step_ids (Sequence[str]): + Optional. The optional list of prerequisite job step_ids. If + not specified, the job will start at the beginning of + workflow. + """ + + step_id = proto.Field( + proto.STRING, + number=1, + ) + hadoop_job = proto.Field( + proto.MESSAGE, + number=2, + oneof='job_type', + message=gcd_jobs.HadoopJob, + ) + spark_job = proto.Field( + proto.MESSAGE, + number=3, + oneof='job_type', + message=gcd_jobs.SparkJob, + ) + pyspark_job = proto.Field( + proto.MESSAGE, + number=4, + oneof='job_type', + message=gcd_jobs.PySparkJob, + ) + hive_job = proto.Field( + proto.MESSAGE, + number=5, + oneof='job_type', + message=gcd_jobs.HiveJob, + ) + pig_job = proto.Field( + proto.MESSAGE, + number=6, + oneof='job_type', + message=gcd_jobs.PigJob, + ) + spark_r_job = proto.Field( + proto.MESSAGE, + number=11, + oneof='job_type', + message=gcd_jobs.SparkRJob, + ) + spark_sql_job = proto.Field( + proto.MESSAGE, + number=7, + oneof='job_type', + message=gcd_jobs.SparkSqlJob, + ) + presto_job = proto.Field( + proto.MESSAGE, + number=12, + oneof='job_type', + message=gcd_jobs.PrestoJob, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + scheduling = proto.Field( + proto.MESSAGE, + number=9, + message=gcd_jobs.JobScheduling, + ) + prerequisite_step_ids = proto.RepeatedField( + proto.STRING, + number=10, + ) + + +class TemplateParameter(proto.Message): + r"""A configurable parameter that replaces one or more fields in + the template. Parameterizable fields: + - Labels + - File uris + - Job properties + - Job arguments + - Script variables + - Main class (in HadoopJob and SparkJob) + - Zone (in ClusterSelector) + + Attributes: + name (str): + Required. Parameter name. The parameter name is used as the + key, and paired with the parameter value, which are passed + to the template when the template is instantiated. The name + must contain only capital letters (A-Z), numbers (0-9), and + underscores (_), and must not start with a number. The + maximum length is 40 characters. + fields (Sequence[str]): + Required. Paths to all fields that the parameter replaces. A + field is allowed to appear in at most one parameter's list + of field paths. + + A field path is similar in syntax to a + [google.protobuf.FieldMask][google.protobuf.FieldMask]. For + example, a field path that references the zone field of a + workflow template's cluster selector would be specified as + ``placement.clusterSelector.zone``. + + Also, field paths can reference fields using the following + syntax: + + - Values in maps can be referenced by key: + + - labels['key'] + - placement.clusterSelector.clusterLabels['key'] + - placement.managedCluster.labels['key'] + - placement.clusterSelector.clusterLabels['key'] + - jobs['step-id'].labels['key'] + + - Jobs in the jobs list can be referenced by step-id: + + - jobs['step-id'].hadoopJob.mainJarFileUri + - jobs['step-id'].hiveJob.queryFileUri + - jobs['step-id'].pySparkJob.mainPythonFileUri + - jobs['step-id'].hadoopJob.jarFileUris[0] + - jobs['step-id'].hadoopJob.archiveUris[0] + - jobs['step-id'].hadoopJob.fileUris[0] + - jobs['step-id'].pySparkJob.pythonFileUris[0] + + - Items in repeated fields can be referenced by a + zero-based index: + + - jobs['step-id'].sparkJob.args[0] + + - Other examples: + + - jobs['step-id'].hadoopJob.properties['key'] + - jobs['step-id'].hadoopJob.args[0] + - jobs['step-id'].hiveJob.scriptVariables['key'] + - jobs['step-id'].hadoopJob.mainJarFileUri + - placement.clusterSelector.zone + + It may not be possible to parameterize maps and repeated + fields in their entirety since only individual map values + and individual items in repeated fields can be referenced. + For example, the following field paths are invalid: + + - placement.clusterSelector.clusterLabels + - jobs['step-id'].sparkJob.args + description (str): + Optional. Brief description of the parameter. + Must not exceed 1024 characters. + validation (google.cloud.dataproc_v1.types.ParameterValidation): + Optional. Validation rules to be applied to + this parameter's value. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + fields = proto.RepeatedField( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + validation = proto.Field( + proto.MESSAGE, + number=4, + message='ParameterValidation', + ) + + +class ParameterValidation(proto.Message): + r"""Configuration for parameter validation. + Attributes: + regex (google.cloud.dataproc_v1.types.RegexValidation): + Validation based on regular expressions. + values (google.cloud.dataproc_v1.types.ValueValidation): + Validation based on a list of allowed values. + """ + + regex = proto.Field( + proto.MESSAGE, + number=1, + oneof='validation_type', + message='RegexValidation', + ) + values = proto.Field( + proto.MESSAGE, + number=2, + oneof='validation_type', + message='ValueValidation', + ) + + +class RegexValidation(proto.Message): + r"""Validation based on regular expressions. + Attributes: + regexes (Sequence[str]): + Required. RE2 regular expressions used to + validate the parameter's value. The value must + match the regex in its entirety (substring + matches are not sufficient). + """ + + regexes = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class ValueValidation(proto.Message): + r"""Validation based on a list of allowed values. + Attributes: + values (Sequence[str]): + Required. List of allowed values for the + parameter. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class WorkflowMetadata(proto.Message): + r"""A Dataproc workflow template resource. + Attributes: + template (str): + Output only. The resource name of the workflow template as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates``, the resource + name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates``, the + resource name of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Output only. The version of template at the + time of workflow instantiation. + create_cluster (google.cloud.dataproc_v1.types.ClusterOperation): + Output only. The create cluster operation + metadata. + graph (google.cloud.dataproc_v1.types.WorkflowGraph): + Output only. The workflow graph. + delete_cluster (google.cloud.dataproc_v1.types.ClusterOperation): + Output only. The delete cluster operation + metadata. + state (google.cloud.dataproc_v1.types.WorkflowMetadata.State): + Output only. The workflow state. + cluster_name (str): + Output only. The name of the target cluster. + parameters (Sequence[google.cloud.dataproc_v1.types.WorkflowMetadata.ParametersEntry]): + Map from parameter names to values that were + used for those parameters. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Workflow start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Workflow end time. + cluster_uuid (str): + Output only. The UUID of target cluster. + dag_timeout (google.protobuf.duration_pb2.Duration): + Output only. The timeout duration for the DAG of jobs, + expressed in seconds (see `JSON representation of + duration `__). + dag_start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. DAG start time, only set for workflows with + [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] + when DAG begins. + dag_end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. DAG end time, only set for workflows with + [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] + when DAG ends. + """ + class State(proto.Enum): + r"""The operation state.""" + UNKNOWN = 0 + PENDING = 1 + RUNNING = 2 + DONE = 3 + + template = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.INT32, + number=2, + ) + create_cluster = proto.Field( + proto.MESSAGE, + number=3, + message='ClusterOperation', + ) + graph = proto.Field( + proto.MESSAGE, + number=4, + message='WorkflowGraph', + ) + delete_cluster = proto.Field( + proto.MESSAGE, + number=5, + message='ClusterOperation', + ) + state = proto.Field( + proto.ENUM, + number=6, + enum=State, + ) + cluster_name = proto.Field( + proto.STRING, + number=7, + ) + parameters = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + start_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + cluster_uuid = proto.Field( + proto.STRING, + number=11, + ) + dag_timeout = proto.Field( + proto.MESSAGE, + number=12, + message=duration_pb2.Duration, + ) + dag_start_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + dag_end_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + + +class ClusterOperation(proto.Message): + r"""The cluster operation triggered by a workflow. + Attributes: + operation_id (str): + Output only. The id of the cluster operation. + error (str): + Output only. Error, if operation failed. + done (bool): + Output only. Indicates the operation is done. + """ + + operation_id = proto.Field( + proto.STRING, + number=1, + ) + error = proto.Field( + proto.STRING, + number=2, + ) + done = proto.Field( + proto.BOOL, + number=3, + ) + + +class WorkflowGraph(proto.Message): + r"""The workflow graph. + Attributes: + nodes (Sequence[google.cloud.dataproc_v1.types.WorkflowNode]): + Output only. The workflow nodes. + """ + + nodes = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='WorkflowNode', + ) + + +class WorkflowNode(proto.Message): + r"""The workflow node. + Attributes: + step_id (str): + Output only. The name of the node. + prerequisite_step_ids (Sequence[str]): + Output only. Node's prerequisite nodes. + job_id (str): + Output only. The job id; populated after the + node enters RUNNING state. + state (google.cloud.dataproc_v1.types.WorkflowNode.NodeState): + Output only. The node state. + error (str): + Output only. The error detail. + """ + class NodeState(proto.Enum): + r"""The workflow node state.""" + NODE_STATE_UNSPECIFIED = 0 + BLOCKED = 1 + RUNNABLE = 2 + RUNNING = 3 + COMPLETED = 4 + FAILED = 5 + + step_id = proto.Field( + proto.STRING, + number=1, + ) + prerequisite_step_ids = proto.RepeatedField( + proto.STRING, + number=2, + ) + job_id = proto.Field( + proto.STRING, + number=3, + ) + state = proto.Field( + proto.ENUM, + number=5, + enum=NodeState, + ) + error = proto.Field( + proto.STRING, + number=6, + ) + + +class CreateWorkflowTemplateRequest(proto.Message): + r"""A request to create a workflow template. + Attributes: + parent (str): + Required. The resource name of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, the + resource name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + template (google.cloud.dataproc_v1.types.WorkflowTemplate): + Required. The Dataproc workflow template to + create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + template = proto.Field( + proto.MESSAGE, + number=2, + message='WorkflowTemplate', + ) + + +class GetWorkflowTemplateRequest(proto.Message): + r"""A request to fetch a workflow template. + Attributes: + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the + resource name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the + resource name of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. The version of workflow template to + retrieve. Only previously instantiated versions + can be retrieved. + If unspecified, retrieves the current version. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.INT32, + number=2, + ) + + +class InstantiateWorkflowTemplateRequest(proto.Message): + r"""A request to instantiate a workflow template. + Attributes: + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. The version of workflow template to + instantiate. If specified, the workflow will be + instantiated only if the current version of the + workflow template has the supplied version. + This option cannot be used to instantiate a + previous version of workflow template. + request_id (str): + Optional. A tag that prevents multiple concurrent workflow + instances with the same tag from running. This mitigates + risk of concurrent instances started due to retries. + + It is recommended to always set this value to a + `UUID `__. + + The tag must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + parameters (Sequence[google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest.ParametersEntry]): + Optional. Map from parameter names to values + that should be used for those parameters. Values + may not exceed 1000 characters. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.INT32, + number=2, + ) + request_id = proto.Field( + proto.STRING, + number=5, + ) + parameters = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + + +class InstantiateInlineWorkflowTemplateRequest(proto.Message): + r"""A request to instantiate an inline workflow template. + Attributes: + parent (str): + Required. The resource name of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates,instantiateinline``, + the resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.workflowTemplates.instantiateinline``, + the resource name of the location has the following + format: ``projects/{project_id}/locations/{location}`` + template (google.cloud.dataproc_v1.types.WorkflowTemplate): + Required. The workflow template to + instantiate. + request_id (str): + Optional. A tag that prevents multiple concurrent workflow + instances with the same tag from running. This mitigates + risk of concurrent instances started due to retries. + + It is recommended to always set this value to a + `UUID `__. + + The tag must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + template = proto.Field( + proto.MESSAGE, + number=2, + message='WorkflowTemplate', + ) + request_id = proto.Field( + proto.STRING, + number=3, + ) + + +class UpdateWorkflowTemplateRequest(proto.Message): + r"""A request to update a workflow template. + Attributes: + template (google.cloud.dataproc_v1.types.WorkflowTemplate): + Required. The updated workflow template. + + The ``template.version`` field must match the current + version. + """ + + template = proto.Field( + proto.MESSAGE, + number=1, + message='WorkflowTemplate', + ) + + +class ListWorkflowTemplatesRequest(proto.Message): + r"""A request to list workflow templates in a project. + Attributes: + parent (str): + Required. The resource name of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.list``, the + resource name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + page_size (int): + Optional. The maximum number of results to + return in each response. + page_token (str): + Optional. The page token, returned by a + previous call, to request the next page of + results. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + + +class ListWorkflowTemplatesResponse(proto.Message): + r"""A response to a request to list workflow templates in a + project. + + Attributes: + templates (Sequence[google.cloud.dataproc_v1.types.WorkflowTemplate]): + Output only. WorkflowTemplates list. + next_page_token (str): + Output only. This token is included in the response if there + are more results to fetch. To fetch additional results, + provide this value as the page_token in a subsequent + ListWorkflowTemplatesRequest. + """ + + @property + def raw_page(self): + return self + + templates = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='WorkflowTemplate', + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteWorkflowTemplateRequest(proto.Message): + r"""A request to delete a workflow template. + Currently started workflows will remain running. + + Attributes: + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.delete``, the + resource name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. The version of workflow template to + delete. If specified, will only delete the + template if the current server version matches + specified version. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/mypy.ini b/owl-bot-staging/v1/mypy.ini new file mode 100644 index 00000000..4505b485 --- /dev/null +++ b/owl-bot-staging/v1/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1/noxfile.py b/owl-bot-staging/v1/noxfile.py new file mode 100644 index 00000000..5d9be515 --- /dev/null +++ b/owl-bot-staging/v1/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/dataproc_v1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1/scripts/fixup_dataproc_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_dataproc_v1_keywords.py new file mode 100644 index 00000000..a976d109 --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_dataproc_v1_keywords.py @@ -0,0 +1,202 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class dataprocCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'cancel_job': ('project_id', 'region', 'job_id', ), + 'create_autoscaling_policy': ('parent', 'policy', ), + 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', ), + 'create_workflow_template': ('parent', 'template', ), + 'delete_autoscaling_policy': ('name', ), + 'delete_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), + 'delete_job': ('project_id', 'region', 'job_id', ), + 'delete_workflow_template': ('name', 'version', ), + 'diagnose_cluster': ('project_id', 'region', 'cluster_name', ), + 'get_autoscaling_policy': ('name', ), + 'get_cluster': ('project_id', 'region', 'cluster_name', ), + 'get_job': ('project_id', 'region', 'job_id', ), + 'get_workflow_template': ('name', 'version', ), + 'instantiate_inline_workflow_template': ('parent', 'template', 'request_id', ), + 'instantiate_workflow_template': ('name', 'version', 'request_id', 'parameters', ), + 'list_autoscaling_policies': ('parent', 'page_size', 'page_token', ), + 'list_clusters': ('project_id', 'region', 'filter', 'page_size', 'page_token', ), + 'list_jobs': ('project_id', 'region', 'page_size', 'page_token', 'cluster_name', 'job_state_matcher', 'filter', ), + 'list_workflow_templates': ('parent', 'page_size', 'page_token', ), + 'start_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), + 'stop_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), + 'submit_job': ('project_id', 'region', 'job', 'request_id', ), + 'submit_job_as_operation': ('project_id', 'region', 'job', 'request_id', ), + 'update_autoscaling_policy': ('policy', ), + 'update_cluster': ('project_id', 'region', 'cluster_name', 'cluster', 'update_mask', 'graceful_decommission_timeout', 'request_id', ), + 'update_job': ('project_id', 'region', 'job_id', 'job', 'update_mask', ), + 'update_workflow_template': ('template', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=dataprocCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the dataproc client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/setup.py b/owl-bot-staging/v1/setup.py new file mode 100644 index 00000000..63f4596f --- /dev/null +++ b/owl-bot-staging/v1/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-dataproc', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1/tests/__init__.py b/owl-bot-staging/v1/tests/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/__init__.py b/owl-bot-staging/v1/tests/unit/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py new file mode 100644 index 00000000..5e2c6c0d --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py @@ -0,0 +1,2293 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1.services.autoscaling_policy_service import AutoscalingPolicyServiceAsyncClient +from google.cloud.dataproc_v1.services.autoscaling_policy_service import AutoscalingPolicyServiceClient +from google.cloud.dataproc_v1.services.autoscaling_policy_service import pagers +from google.cloud.dataproc_v1.services.autoscaling_policy_service import transports +from google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.dataproc_v1.types import autoscaling_policies +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(None) is None + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + AutoscalingPolicyServiceClient, + AutoscalingPolicyServiceAsyncClient, +]) +def test_autoscaling_policy_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + AutoscalingPolicyServiceClient, + AutoscalingPolicyServiceAsyncClient, +]) +def test_autoscaling_policy_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), + (transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_autoscaling_policy_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + AutoscalingPolicyServiceClient, + AutoscalingPolicyServiceAsyncClient, +]) +def test_autoscaling_policy_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_autoscaling_policy_service_client_get_transport_class(): + transport = AutoscalingPolicyServiceClient.get_transport_class() + available_transports = [ + transports.AutoscalingPolicyServiceGrpcTransport, + ] + assert transport in available_transports + + transport = AutoscalingPolicyServiceClient.get_transport_class("grpc") + assert transport == transports.AutoscalingPolicyServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), + (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(AutoscalingPolicyServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceClient)) +@mock.patch.object(AutoscalingPolicyServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceAsyncClient)) +def test_autoscaling_policy_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(AutoscalingPolicyServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(AutoscalingPolicyServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc", "true"), + (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc", "false"), + (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(AutoscalingPolicyServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceClient)) +@mock.patch.object(AutoscalingPolicyServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_autoscaling_policy_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), + (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_autoscaling_policy_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), + (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_autoscaling_policy_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_autoscaling_policy_service_client_client_options_from_dict(): + with mock.patch('google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = AutoscalingPolicyServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.CreateAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy( + id='id_value', + name='name_value', + basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(graceful_decommission_timeout=duration_pb2.Duration(seconds=751))), + ) + response = client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + assert response.id == 'id_value' + assert response.name == 'name_value' + + +def test_create_autoscaling_policy_from_dict(): + test_create_autoscaling_policy(request_type=dict) + + +def test_create_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + client.create_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.CreateAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy( + id='id_value', + name='name_value', + )) + response = await client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + assert response.id == 'id_value' + assert response.name == 'name_value' + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_async_from_dict(): + await test_create_autoscaling_policy_async(request_type=dict) + + +def test_create_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.CreateAutoscalingPolicyRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + call.return_value = autoscaling_policies.AutoscalingPolicy() + client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.CreateAutoscalingPolicyRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) + await client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_autoscaling_policy( + parent='parent_value', + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') + + +def test_create_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_autoscaling_policy( + autoscaling_policies.CreateAutoscalingPolicyRequest(), + parent='parent_value', + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_autoscaling_policy( + parent='parent_value', + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_autoscaling_policy( + autoscaling_policies.CreateAutoscalingPolicyRequest(), + parent='parent_value', + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + +def test_update_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.UpdateAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy( + id='id_value', + name='name_value', + basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(graceful_decommission_timeout=duration_pb2.Duration(seconds=751))), + ) + response = client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + assert response.id == 'id_value' + assert response.name == 'name_value' + + +def test_update_autoscaling_policy_from_dict(): + test_update_autoscaling_policy(request_type=dict) + + +def test_update_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + client.update_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.UpdateAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy( + id='id_value', + name='name_value', + )) + response = await client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + assert response.id == 'id_value' + assert response.name == 'name_value' + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_async_from_dict(): + await test_update_autoscaling_policy_async(request_type=dict) + + +def test_update_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.UpdateAutoscalingPolicyRequest() + + request.policy.name = 'policy.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + call.return_value = autoscaling_policies.AutoscalingPolicy() + client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'policy.name=policy.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.UpdateAutoscalingPolicyRequest() + + request.policy.name = 'policy.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) + await client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'policy.name=policy.name/value', + ) in kw['metadata'] + + +def test_update_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_autoscaling_policy( + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') + + +def test_update_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_autoscaling_policy( + autoscaling_policies.UpdateAutoscalingPolicyRequest(), + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_autoscaling_policy( + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_autoscaling_policy( + autoscaling_policies.UpdateAutoscalingPolicyRequest(), + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + +def test_get_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.GetAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy( + id='id_value', + name='name_value', + basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(graceful_decommission_timeout=duration_pb2.Duration(seconds=751))), + ) + response = client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + assert response.id == 'id_value' + assert response.name == 'name_value' + + +def test_get_autoscaling_policy_from_dict(): + test_get_autoscaling_policy(request_type=dict) + + +def test_get_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + client.get_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.GetAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy( + id='id_value', + name='name_value', + )) + response = await client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + assert response.id == 'id_value' + assert response.name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_async_from_dict(): + await test_get_autoscaling_policy_async(request_type=dict) + + +def test_get_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.GetAutoscalingPolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + call.return_value = autoscaling_policies.AutoscalingPolicy() + client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.GetAutoscalingPolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) + await client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_autoscaling_policy( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_autoscaling_policy( + autoscaling_policies.GetAutoscalingPolicyRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_autoscaling_policy( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_autoscaling_policy( + autoscaling_policies.GetAutoscalingPolicyRequest(), + name='name_value', + ) + + +def test_list_autoscaling_policies(transport: str = 'grpc', request_type=autoscaling_policies.ListAutoscalingPoliciesRequest): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAutoscalingPoliciesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_autoscaling_policies_from_dict(): + test_list_autoscaling_policies(request_type=dict) + + +def test_list_autoscaling_policies_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + client.list_autoscaling_policies() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.ListAutoscalingPoliciesRequest): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.ListAutoscalingPoliciesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAutoscalingPoliciesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async_from_dict(): + await test_list_autoscaling_policies_async(request_type=dict) + + +def test_list_autoscaling_policies_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.ListAutoscalingPoliciesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() + client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.ListAutoscalingPoliciesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.ListAutoscalingPoliciesResponse()) + await client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_autoscaling_policies_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_autoscaling_policies( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_autoscaling_policies_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_autoscaling_policies( + autoscaling_policies.ListAutoscalingPoliciesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.ListAutoscalingPoliciesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_autoscaling_policies( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_autoscaling_policies( + autoscaling_policies.ListAutoscalingPoliciesRequest(), + parent='parent_value', + ) + + +def test_list_autoscaling_policies_pager(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='abc', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], + next_page_token='def', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='ghi', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_autoscaling_policies(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, autoscaling_policies.AutoscalingPolicy) + for i in results) + +def test_list_autoscaling_policies_pages(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='abc', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], + next_page_token='def', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='ghi', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + pages = list(client.list_autoscaling_policies(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async_pager(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='abc', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], + next_page_token='def', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='ghi', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_autoscaling_policies(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, autoscaling_policies.AutoscalingPolicy) + for i in responses) + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async_pages(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='abc', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], + next_page_token='def', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='ghi', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_autoscaling_policies(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.DeleteAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_autoscaling_policy_from_dict(): + test_delete_autoscaling_policy(request_type=dict) + + +def test_delete_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + client.delete_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.DeleteAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_async_from_dict(): + await test_delete_autoscaling_policy_async(request_type=dict) + + +def test_delete_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.DeleteAutoscalingPolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + call.return_value = None + client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.DeleteAutoscalingPolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_autoscaling_policy( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_autoscaling_policy( + autoscaling_policies.DeleteAutoscalingPolicyRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_autoscaling_policy( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_autoscaling_policy( + autoscaling_policies.DeleteAutoscalingPolicyRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalingPolicyServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalingPolicyServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = AutoscalingPolicyServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.AutoscalingPolicyServiceGrpcTransport, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.AutoscalingPolicyServiceGrpcTransport, + ) + +def test_autoscaling_policy_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.AutoscalingPolicyServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_autoscaling_policy_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.AutoscalingPolicyServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_autoscaling_policy', + 'update_autoscaling_policy', + 'get_autoscaling_policy', + 'list_autoscaling_policies', + 'delete_autoscaling_policy', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +@requires_google_auth_gte_1_25_0 +def test_autoscaling_policy_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AutoscalingPolicyServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_autoscaling_policy_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AutoscalingPolicyServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_autoscaling_policy_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AutoscalingPolicyServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_autoscaling_policy_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + AutoscalingPolicyServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_autoscaling_policy_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + AutoscalingPolicyServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.AutoscalingPolicyServiceGrpcTransport, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_autoscaling_policy_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.AutoscalingPolicyServiceGrpcTransport, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_autoscaling_policy_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.AutoscalingPolicyServiceGrpcTransport, grpc_helpers), + (transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_autoscaling_policy_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "dataproc.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="dataproc.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.AutoscalingPolicyServiceGrpcTransport, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport]) +def test_autoscaling_policy_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_autoscaling_policy_service_host_no_port(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), + ) + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_autoscaling_policy_service_host_with_port(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), + ) + assert client.transport._host == 'dataproc.googleapis.com:8000' + +def test_autoscaling_policy_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_autoscaling_policy_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.AutoscalingPolicyServiceGrpcTransport, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport]) +def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.AutoscalingPolicyServiceGrpcTransport, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport]) +def test_autoscaling_policy_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_autoscaling_policy_path(): + project = "squid" + location = "clam" + autoscaling_policy = "whelk" + expected = "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format(project=project, location=location, autoscaling_policy=autoscaling_policy, ) + actual = AutoscalingPolicyServiceClient.autoscaling_policy_path(project, location, autoscaling_policy) + assert expected == actual + + +def test_parse_autoscaling_policy_path(): + expected = { + "project": "octopus", + "location": "oyster", + "autoscaling_policy": "nudibranch", + } + path = AutoscalingPolicyServiceClient.autoscaling_policy_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_autoscaling_policy_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = AutoscalingPolicyServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = AutoscalingPolicyServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = AutoscalingPolicyServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = AutoscalingPolicyServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = AutoscalingPolicyServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = AutoscalingPolicyServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = AutoscalingPolicyServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = AutoscalingPolicyServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = AutoscalingPolicyServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = AutoscalingPolicyServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.AutoscalingPolicyServiceTransport, '_prep_wrapped_messages') as prep: + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.AutoscalingPolicyServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = AutoscalingPolicyServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_cluster_controller.py b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_cluster_controller.py new file mode 100644 index 00000000..f0d8e2af --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_cluster_controller.py @@ -0,0 +1,2449 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1.services.cluster_controller import ClusterControllerAsyncClient +from google.cloud.dataproc_v1.services.cluster_controller import ClusterControllerClient +from google.cloud.dataproc_v1.services.cluster_controller import pagers +from google.cloud.dataproc_v1.services.cluster_controller import transports +from google.cloud.dataproc_v1.services.cluster_controller.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import operations +from google.cloud.dataproc_v1.types import shared +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ClusterControllerClient._get_default_mtls_endpoint(None) is None + assert ClusterControllerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ClusterControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ClusterControllerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ClusterControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ClusterControllerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ClusterControllerClient, + ClusterControllerAsyncClient, +]) +def test_cluster_controller_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + ClusterControllerClient, + ClusterControllerAsyncClient, +]) +def test_cluster_controller_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ClusterControllerGrpcTransport, "grpc"), + (transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_cluster_controller_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + ClusterControllerClient, + ClusterControllerAsyncClient, +]) +def test_cluster_controller_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_cluster_controller_client_get_transport_class(): + transport = ClusterControllerClient.get_transport_class() + available_transports = [ + transports.ClusterControllerGrpcTransport, + ] + assert transport in available_transports + + transport = ClusterControllerClient.get_transport_class("grpc") + assert transport == transports.ClusterControllerGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), + (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(ClusterControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerClient)) +@mock.patch.object(ClusterControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerAsyncClient)) +def test_cluster_controller_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ClusterControllerClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ClusterControllerClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc", "true"), + (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc", "false"), + (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(ClusterControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerClient)) +@mock.patch.object(ClusterControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_cluster_controller_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), + (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_cluster_controller_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), + (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_cluster_controller_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_cluster_controller_client_client_options_from_dict(): + with mock.patch('google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = ClusterControllerClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_cluster(transport: str = 'grpc', request_type=clusters.CreateClusterRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_cluster_from_dict(): + test_create_cluster(request_type=dict) + + +def test_create_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + client.create_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.CreateClusterRequest() + + +@pytest.mark.asyncio +async def test_create_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.CreateClusterRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_cluster_async_from_dict(): + await test_create_cluster_async(request_type=dict) + + +def test_create_cluster_flattened(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + project_id='project_id_value', + region='region_value', + cluster=clusters.Cluster(project_id='project_id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster == clusters.Cluster(project_id='project_id_value') + + +def test_create_cluster_flattened_error(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + clusters.CreateClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster=clusters.Cluster(project_id='project_id_value'), + ) + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + project_id='project_id_value', + region='region_value', + cluster=clusters.Cluster(project_id='project_id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster == clusters.Cluster(project_id='project_id_value') + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + clusters.CreateClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster=clusters.Cluster(project_id='project_id_value'), + ) + + +def test_update_cluster(transport: str = 'grpc', request_type=clusters.UpdateClusterRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.UpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_cluster_from_dict(): + test_update_cluster(request_type=dict) + + +def test_update_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + client.update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.UpdateClusterRequest() + + +@pytest.mark.asyncio +async def test_update_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.UpdateClusterRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.UpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_cluster_async_from_dict(): + await test_update_cluster_async(request_type=dict) + + +def test_update_cluster_flattened(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + cluster=clusters.Cluster(project_id='project_id_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + assert args[0].cluster == clusters.Cluster(project_id='project_id_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_cluster_flattened_error(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_cluster( + clusters.UpdateClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + cluster=clusters.Cluster(project_id='project_id_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + cluster=clusters.Cluster(project_id='project_id_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + assert args[0].cluster == clusters.Cluster(project_id='project_id_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_cluster( + clusters.UpdateClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + cluster=clusters.Cluster(project_id='project_id_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_stop_cluster(transport: str = 'grpc', request_type=clusters.StopClusterRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.stop_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.StopClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_stop_cluster_from_dict(): + test_stop_cluster(request_type=dict) + + +def test_stop_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_cluster), + '__call__') as call: + client.stop_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.StopClusterRequest() + + +@pytest.mark.asyncio +async def test_stop_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.StopClusterRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.stop_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.StopClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_stop_cluster_async_from_dict(): + await test_stop_cluster_async(request_type=dict) + + +def test_start_cluster(transport: str = 'grpc', request_type=clusters.StartClusterRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.start_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.StartClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_start_cluster_from_dict(): + test_start_cluster(request_type=dict) + + +def test_start_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_cluster), + '__call__') as call: + client.start_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.StartClusterRequest() + + +@pytest.mark.asyncio +async def test_start_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.StartClusterRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.start_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.StartClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_start_cluster_async_from_dict(): + await test_start_cluster_async(request_type=dict) + + +def test_delete_cluster(transport: str = 'grpc', request_type=clusters.DeleteClusterRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_cluster_from_dict(): + test_delete_cluster(request_type=dict) + + +def test_delete_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + client.delete_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.DeleteClusterRequest() + + +@pytest.mark.asyncio +async def test_delete_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.DeleteClusterRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_cluster_async_from_dict(): + await test_delete_cluster_async(request_type=dict) + + +def test_delete_cluster_flattened(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + + +def test_delete_cluster_flattened_error(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + clusters.DeleteClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + clusters.DeleteClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + +def test_get_cluster(transport: str = 'grpc', request_type=clusters.GetClusterRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.Cluster( + project_id='project_id_value', + cluster_name='cluster_name_value', + cluster_uuid='cluster_uuid_value', + ) + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, clusters.Cluster) + assert response.project_id == 'project_id_value' + assert response.cluster_name == 'cluster_name_value' + assert response.cluster_uuid == 'cluster_uuid_value' + + +def test_get_cluster_from_dict(): + test_get_cluster(request_type=dict) + + +def test_get_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + client.get_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.GetClusterRequest() + + +@pytest.mark.asyncio +async def test_get_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.GetClusterRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(clusters.Cluster( + project_id='project_id_value', + cluster_name='cluster_name_value', + cluster_uuid='cluster_uuid_value', + )) + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, clusters.Cluster) + assert response.project_id == 'project_id_value' + assert response.cluster_name == 'cluster_name_value' + assert response.cluster_uuid == 'cluster_uuid_value' + + +@pytest.mark.asyncio +async def test_get_cluster_async_from_dict(): + await test_get_cluster_async(request_type=dict) + + +def test_get_cluster_flattened(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.Cluster() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + + +def test_get_cluster_flattened_error(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + clusters.GetClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(clusters.Cluster()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + clusters.GetClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + +def test_list_clusters(transport: str = 'grpc', request_type=clusters.ListClustersRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.ListClustersResponse( + next_page_token='next_page_token_value', + ) + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_clusters_from_dict(): + test_list_clusters(request_type=dict) + + +def test_list_clusters_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + client.list_clusters() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.ListClustersRequest() + + +@pytest.mark.asyncio +async def test_list_clusters_async(transport: str = 'grpc_asyncio', request_type=clusters.ListClustersRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(clusters.ListClustersResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_clusters_async_from_dict(): + await test_list_clusters_async(request_type=dict) + + +def test_list_clusters_flattened(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.ListClustersResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters( + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].filter == 'filter_value' + + +def test_list_clusters_flattened_error(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + clusters.ListClustersRequest(), + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(clusters.ListClustersResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters( + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].filter == 'filter_value' + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + clusters.ListClustersRequest(), + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + +def test_list_clusters_pager(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + clusters.Cluster(), + ], + next_page_token='abc', + ), + clusters.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + ], + next_page_token='ghi', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + ], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_clusters(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, clusters.Cluster) + for i in results) + +def test_list_clusters_pages(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + clusters.Cluster(), + ], + next_page_token='abc', + ), + clusters.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + ], + next_page_token='ghi', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + ], + ), + RuntimeError, + ) + pages = list(client.list_clusters(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_clusters_async_pager(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + clusters.Cluster(), + ], + next_page_token='abc', + ), + clusters.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + ], + next_page_token='ghi', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_clusters(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, clusters.Cluster) + for i in responses) + +@pytest.mark.asyncio +async def test_list_clusters_async_pages(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + clusters.Cluster(), + ], + next_page_token='abc', + ), + clusters.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + ], + next_page_token='ghi', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_clusters(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_diagnose_cluster(transport: str = 'grpc', request_type=clusters.DiagnoseClusterRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.diagnose_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.diagnose_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.DiagnoseClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_diagnose_cluster_from_dict(): + test_diagnose_cluster(request_type=dict) + + +def test_diagnose_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.diagnose_cluster), + '__call__') as call: + client.diagnose_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.DiagnoseClusterRequest() + + +@pytest.mark.asyncio +async def test_diagnose_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.DiagnoseClusterRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.diagnose_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.diagnose_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.DiagnoseClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_diagnose_cluster_async_from_dict(): + await test_diagnose_cluster_async(request_type=dict) + + +def test_diagnose_cluster_flattened(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.diagnose_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.diagnose_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + + +def test_diagnose_cluster_flattened_error(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.diagnose_cluster( + clusters.DiagnoseClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + +@pytest.mark.asyncio +async def test_diagnose_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.diagnose_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.diagnose_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + + +@pytest.mark.asyncio +async def test_diagnose_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.diagnose_cluster( + clusters.DiagnoseClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterControllerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterControllerClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ClusterControllerClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ClusterControllerGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.ClusterControllerGrpcTransport, + transports.ClusterControllerGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ClusterControllerGrpcTransport, + ) + +def test_cluster_controller_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ClusterControllerTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_cluster_controller_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ClusterControllerTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_cluster', + 'update_cluster', + 'stop_cluster', + 'start_cluster', + 'delete_cluster', + 'get_cluster', + 'list_clusters', + 'diagnose_cluster', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_cluster_controller_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ClusterControllerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_cluster_controller_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ClusterControllerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_cluster_controller_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ClusterControllerTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_cluster_controller_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ClusterControllerClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_cluster_controller_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ClusterControllerClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterControllerGrpcTransport, + transports.ClusterControllerGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_cluster_controller_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterControllerGrpcTransport, + transports.ClusterControllerGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_cluster_controller_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ClusterControllerGrpcTransport, grpc_helpers), + (transports.ClusterControllerGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_cluster_controller_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "dataproc.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="dataproc.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.ClusterControllerGrpcTransport, transports.ClusterControllerGrpcAsyncIOTransport]) +def test_cluster_controller_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_cluster_controller_host_no_port(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), + ) + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_cluster_controller_host_with_port(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), + ) + assert client.transport._host == 'dataproc.googleapis.com:8000' + +def test_cluster_controller_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ClusterControllerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_cluster_controller_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ClusterControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ClusterControllerGrpcTransport, transports.ClusterControllerGrpcAsyncIOTransport]) +def test_cluster_controller_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ClusterControllerGrpcTransport, transports.ClusterControllerGrpcAsyncIOTransport]) +def test_cluster_controller_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_cluster_controller_grpc_lro_client(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_cluster_controller_grpc_lro_async_client(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_cluster_path(): + project = "squid" + location = "clam" + cluster = "whelk" + expected = "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) + actual = ClusterControllerClient.cluster_path(project, location, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "octopus", + "location": "oyster", + "cluster": "nudibranch", + } + path = ClusterControllerClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_cluster_path(path) + assert expected == actual + +def test_service_path(): + project = "cuttlefish" + location = "mussel" + service = "winkle" + expected = "projects/{project}/locations/{location}/services/{service}".format(project=project, location=location, service=service, ) + actual = ClusterControllerClient.service_path(project, location, service) + assert expected == actual + + +def test_parse_service_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "service": "abalone", + } + path = ClusterControllerClient.service_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_service_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ClusterControllerClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ClusterControllerClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ClusterControllerClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ClusterControllerClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ClusterControllerClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ClusterControllerClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ClusterControllerClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ClusterControllerClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ClusterControllerClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ClusterControllerClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ClusterControllerTransport, '_prep_wrapped_messages') as prep: + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ClusterControllerTransport, '_prep_wrapped_messages') as prep: + transport_class = ClusterControllerClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_job_controller.py b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_job_controller.py new file mode 100644 index 00000000..245fde1e --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_job_controller.py @@ -0,0 +1,2355 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1.services.job_controller import JobControllerAsyncClient +from google.cloud.dataproc_v1.services.job_controller import JobControllerClient +from google.cloud.dataproc_v1.services.job_controller import pagers +from google.cloud.dataproc_v1.services.job_controller import transports +from google.cloud.dataproc_v1.services.job_controller.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.dataproc_v1.types import jobs +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert JobControllerClient._get_default_mtls_endpoint(None) is None + assert JobControllerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert JobControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert JobControllerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert JobControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert JobControllerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + JobControllerClient, + JobControllerAsyncClient, +]) +def test_job_controller_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + JobControllerClient, + JobControllerAsyncClient, +]) +def test_job_controller_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.JobControllerGrpcTransport, "grpc"), + (transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_controller_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + JobControllerClient, + JobControllerAsyncClient, +]) +def test_job_controller_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_job_controller_client_get_transport_class(): + transport = JobControllerClient.get_transport_class() + available_transports = [ + transports.JobControllerGrpcTransport, + ] + assert transport in available_transports + + transport = JobControllerClient.get_transport_class("grpc") + assert transport == transports.JobControllerGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), + (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(JobControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerClient)) +@mock.patch.object(JobControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerAsyncClient)) +def test_job_controller_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(JobControllerClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(JobControllerClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc", "true"), + (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc", "false"), + (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(JobControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerClient)) +@mock.patch.object(JobControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_job_controller_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), + (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_controller_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), + (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_controller_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_job_controller_client_client_options_from_dict(): + with mock.patch('google.cloud.dataproc_v1.services.job_controller.transports.JobControllerGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = JobControllerClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_submit_job(transport: str = 'grpc', request_type=jobs.SubmitJobRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), + ) + response = client.submit_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.SubmitJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +def test_submit_job_from_dict(): + test_submit_job(request_type=dict) + + +def test_submit_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job), + '__call__') as call: + client.submit_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.SubmitJobRequest() + + +@pytest.mark.asyncio +async def test_submit_job_async(transport: str = 'grpc_asyncio', request_type=jobs.SubmitJobRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + )) + response = await client.submit_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.SubmitJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +@pytest.mark.asyncio +async def test_submit_job_async_from_dict(): + await test_submit_job_async(request_type=dict) + + +def test_submit_job_flattened(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.submit_job( + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) + + +def test_submit_job_flattened_error(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.submit_job( + jobs.SubmitJobRequest(), + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + +@pytest.mark.asyncio +async def test_submit_job_flattened_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.submit_job( + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) + + +@pytest.mark.asyncio +async def test_submit_job_flattened_error_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.submit_job( + jobs.SubmitJobRequest(), + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + +def test_submit_job_as_operation(transport: str = 'grpc', request_type=jobs.SubmitJobRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job_as_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.submit_job_as_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.SubmitJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_submit_job_as_operation_from_dict(): + test_submit_job_as_operation(request_type=dict) + + +def test_submit_job_as_operation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job_as_operation), + '__call__') as call: + client.submit_job_as_operation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.SubmitJobRequest() + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_async(transport: str = 'grpc_asyncio', request_type=jobs.SubmitJobRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job_as_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.submit_job_as_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.SubmitJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_async_from_dict(): + await test_submit_job_as_operation_async(request_type=dict) + + +def test_submit_job_as_operation_flattened(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job_as_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.submit_job_as_operation( + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) + + +def test_submit_job_as_operation_flattened_error(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.submit_job_as_operation( + jobs.SubmitJobRequest(), + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_flattened_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job_as_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.submit_job_as_operation( + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_flattened_error_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.submit_job_as_operation( + jobs.SubmitJobRequest(), + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + +def test_get_job(transport: str = 'grpc', request_type=jobs.GetJobRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), + ) + response = client.get_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.GetJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +def test_get_job_from_dict(): + test_get_job(request_type=dict) + + +def test_get_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_job), + '__call__') as call: + client.get_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.GetJobRequest() + + +@pytest.mark.asyncio +async def test_get_job_async(transport: str = 'grpc_asyncio', request_type=jobs.GetJobRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + )) + response = await client.get_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.GetJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +@pytest.mark.asyncio +async def test_get_job_async_from_dict(): + await test_get_job_async(request_type=dict) + + +def test_get_job_flattened(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_job( + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job_id == 'job_id_value' + + +def test_get_job_flattened_error(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_job( + jobs.GetJobRequest(), + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + +@pytest.mark.asyncio +async def test_get_job_flattened_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_job( + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job_id == 'job_id_value' + + +@pytest.mark.asyncio +async def test_get_job_flattened_error_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_job( + jobs.GetJobRequest(), + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + +def test_list_jobs(transport: str = 'grpc', request_type=jobs.ListJobsRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.ListJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.ListJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_jobs_from_dict(): + test_list_jobs(request_type=dict) + + +def test_list_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + client.list_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.ListJobsRequest() + + +@pytest.mark.asyncio +async def test_list_jobs_async(transport: str = 'grpc_asyncio', request_type=jobs.ListJobsRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.ListJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.ListJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_jobs_async_from_dict(): + await test_list_jobs_async(request_type=dict) + + +def test_list_jobs_flattened(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.ListJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_jobs( + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].filter == 'filter_value' + + +def test_list_jobs_flattened_error(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_jobs( + jobs.ListJobsRequest(), + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + +@pytest.mark.asyncio +async def test_list_jobs_flattened_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.ListJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.ListJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_jobs( + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].filter == 'filter_value' + + +@pytest.mark.asyncio +async def test_list_jobs_flattened_error_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_jobs( + jobs.ListJobsRequest(), + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + +def test_list_jobs_pager(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + jobs.Job(), + ], + next_page_token='abc', + ), + jobs.ListJobsResponse( + jobs=[], + next_page_token='def', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + ], + next_page_token='ghi', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + ], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, jobs.Job) + for i in results) + +def test_list_jobs_pages(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + jobs.Job(), + ], + next_page_token='abc', + ), + jobs.ListJobsResponse( + jobs=[], + next_page_token='def', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + ], + next_page_token='ghi', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + ], + ), + RuntimeError, + ) + pages = list(client.list_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_jobs_async_pager(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + jobs.Job(), + ], + next_page_token='abc', + ), + jobs.ListJobsResponse( + jobs=[], + next_page_token='def', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + ], + next_page_token='ghi', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, jobs.Job) + for i in responses) + +@pytest.mark.asyncio +async def test_list_jobs_async_pages(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + jobs.Job(), + ], + next_page_token='abc', + ), + jobs.ListJobsResponse( + jobs=[], + next_page_token='def', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + ], + next_page_token='ghi', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_job(transport: str = 'grpc', request_type=jobs.UpdateJobRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), + ) + response = client.update_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.UpdateJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +def test_update_job_from_dict(): + test_update_job(request_type=dict) + + +def test_update_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_job), + '__call__') as call: + client.update_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.UpdateJobRequest() + + +@pytest.mark.asyncio +async def test_update_job_async(transport: str = 'grpc_asyncio', request_type=jobs.UpdateJobRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + )) + response = await client.update_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.UpdateJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +@pytest.mark.asyncio +async def test_update_job_async_from_dict(): + await test_update_job_async(request_type=dict) + + +def test_cancel_job(transport: str = 'grpc', request_type=jobs.CancelJobRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), + ) + response = client.cancel_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.CancelJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +def test_cancel_job_from_dict(): + test_cancel_job(request_type=dict) + + +def test_cancel_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_job), + '__call__') as call: + client.cancel_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.CancelJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_job_async(transport: str = 'grpc_asyncio', request_type=jobs.CancelJobRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + )) + response = await client.cancel_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.CancelJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +@pytest.mark.asyncio +async def test_cancel_job_async_from_dict(): + await test_cancel_job_async(request_type=dict) + + +def test_cancel_job_flattened(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_job( + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job_id == 'job_id_value' + + +def test_cancel_job_flattened_error(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_job( + jobs.CancelJobRequest(), + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_job_flattened_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_job( + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job_id == 'job_id_value' + + +@pytest.mark.asyncio +async def test_cancel_job_flattened_error_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_job( + jobs.CancelJobRequest(), + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + +def test_delete_job(transport: str = 'grpc', request_type=jobs.DeleteJobRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.DeleteJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_job_from_dict(): + test_delete_job(request_type=dict) + + +def test_delete_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_job), + '__call__') as call: + client.delete_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.DeleteJobRequest() + + +@pytest.mark.asyncio +async def test_delete_job_async(transport: str = 'grpc_asyncio', request_type=jobs.DeleteJobRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.DeleteJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_job_async_from_dict(): + await test_delete_job_async(request_type=dict) + + +def test_delete_job_flattened(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_job( + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job_id == 'job_id_value' + + +def test_delete_job_flattened_error(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_job( + jobs.DeleteJobRequest(), + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + +@pytest.mark.asyncio +async def test_delete_job_flattened_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_job( + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job_id == 'job_id_value' + + +@pytest.mark.asyncio +async def test_delete_job_flattened_error_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_job( + jobs.DeleteJobRequest(), + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobControllerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobControllerClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = JobControllerClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.JobControllerGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.JobControllerGrpcTransport, + transports.JobControllerGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.JobControllerGrpcTransport, + ) + +def test_job_controller_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.JobControllerTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_job_controller_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.dataproc_v1.services.job_controller.transports.JobControllerTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.JobControllerTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'submit_job', + 'submit_job_as_operation', + 'get_job', + 'list_jobs', + 'update_job', + 'cancel_job', + 'delete_job', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_job_controller_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobControllerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_job_controller_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobControllerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_job_controller_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobControllerTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_job_controller_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + JobControllerClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_job_controller_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + JobControllerClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.JobControllerGrpcTransport, + transports.JobControllerGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_job_controller_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.JobControllerGrpcTransport, + transports.JobControllerGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_job_controller_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.JobControllerGrpcTransport, grpc_helpers), + (transports.JobControllerGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_job_controller_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "dataproc.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="dataproc.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.JobControllerGrpcTransport, transports.JobControllerGrpcAsyncIOTransport]) +def test_job_controller_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_job_controller_host_no_port(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), + ) + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_job_controller_host_with_port(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), + ) + assert client.transport._host == 'dataproc.googleapis.com:8000' + +def test_job_controller_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.JobControllerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_job_controller_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.JobControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.JobControllerGrpcTransport, transports.JobControllerGrpcAsyncIOTransport]) +def test_job_controller_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.JobControllerGrpcTransport, transports.JobControllerGrpcAsyncIOTransport]) +def test_job_controller_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_job_controller_grpc_lro_client(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_job_controller_grpc_lro_async_client(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = JobControllerClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = JobControllerClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = JobControllerClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = JobControllerClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = JobControllerClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = JobControllerClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = JobControllerClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = JobControllerClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = JobControllerClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = JobControllerClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = JobControllerClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = JobControllerClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = JobControllerClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = JobControllerClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = JobControllerClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.JobControllerTransport, '_prep_wrapped_messages') as prep: + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.JobControllerTransport, '_prep_wrapped_messages') as prep: + transport_class = JobControllerClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py new file mode 100644 index 00000000..cb8075d0 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py @@ -0,0 +1,2863 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1.services.workflow_template_service import WorkflowTemplateServiceAsyncClient +from google.cloud.dataproc_v1.services.workflow_template_service import WorkflowTemplateServiceClient +from google.cloud.dataproc_v1.services.workflow_template_service import pagers +from google.cloud.dataproc_v1.services.workflow_template_service import transports +from google.cloud.dataproc_v1.services.workflow_template_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import jobs +from google.cloud.dataproc_v1.types import shared +from google.cloud.dataproc_v1.types import workflow_templates +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(None) is None + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + WorkflowTemplateServiceClient, + WorkflowTemplateServiceAsyncClient, +]) +def test_workflow_template_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + WorkflowTemplateServiceClient, + WorkflowTemplateServiceAsyncClient, +]) +def test_workflow_template_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.WorkflowTemplateServiceGrpcTransport, "grpc"), + (transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_workflow_template_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + WorkflowTemplateServiceClient, + WorkflowTemplateServiceAsyncClient, +]) +def test_workflow_template_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_workflow_template_service_client_get_transport_class(): + transport = WorkflowTemplateServiceClient.get_transport_class() + available_transports = [ + transports.WorkflowTemplateServiceGrpcTransport, + ] + assert transport in available_transports + + transport = WorkflowTemplateServiceClient.get_transport_class("grpc") + assert transport == transports.WorkflowTemplateServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc"), + (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(WorkflowTemplateServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceClient)) +@mock.patch.object(WorkflowTemplateServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceAsyncClient)) +def test_workflow_template_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(WorkflowTemplateServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(WorkflowTemplateServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc", "true"), + (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc", "false"), + (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(WorkflowTemplateServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceClient)) +@mock.patch.object(WorkflowTemplateServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_workflow_template_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc"), + (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_workflow_template_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc"), + (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_workflow_template_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_workflow_template_service_client_client_options_from_dict(): + with mock.patch('google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = WorkflowTemplateServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_workflow_template(transport: str = 'grpc', request_type=workflow_templates.CreateWorkflowTemplateRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate( + id='id_value', + name='name_value', + version=774, + ) + response = client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + assert response.id == 'id_value' + assert response.name == 'name_value' + assert response.version == 774 + + +def test_create_workflow_template_from_dict(): + test_create_workflow_template(request_type=dict) + + +def test_create_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + client.create_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() + + +@pytest.mark.asyncio +async def test_create_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.CreateWorkflowTemplateRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate( + id='id_value', + name='name_value', + version=774, + )) + response = await client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + assert response.id == 'id_value' + assert response.name == 'name_value' + assert response.version == 774 + + +@pytest.mark.asyncio +async def test_create_workflow_template_async_from_dict(): + await test_create_workflow_template_async(request_type=dict) + + +def test_create_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.CreateWorkflowTemplateRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + call.return_value = workflow_templates.WorkflowTemplate() + client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.CreateWorkflowTemplateRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) + await client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_workflow_template( + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') + + +def test_create_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_workflow_template( + workflow_templates.CreateWorkflowTemplateRequest(), + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + +@pytest.mark.asyncio +async def test_create_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_workflow_template( + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') + + +@pytest.mark.asyncio +async def test_create_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_workflow_template( + workflow_templates.CreateWorkflowTemplateRequest(), + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + +def test_get_workflow_template(transport: str = 'grpc', request_type=workflow_templates.GetWorkflowTemplateRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate( + id='id_value', + name='name_value', + version=774, + ) + response = client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.GetWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + assert response.id == 'id_value' + assert response.name == 'name_value' + assert response.version == 774 + + +def test_get_workflow_template_from_dict(): + test_get_workflow_template(request_type=dict) + + +def test_get_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + client.get_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.GetWorkflowTemplateRequest() + + +@pytest.mark.asyncio +async def test_get_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.GetWorkflowTemplateRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate( + id='id_value', + name='name_value', + version=774, + )) + response = await client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.GetWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + assert response.id == 'id_value' + assert response.name == 'name_value' + assert response.version == 774 + + +@pytest.mark.asyncio +async def test_get_workflow_template_async_from_dict(): + await test_get_workflow_template_async(request_type=dict) + + +def test_get_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.GetWorkflowTemplateRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + call.return_value = workflow_templates.WorkflowTemplate() + client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.GetWorkflowTemplateRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) + await client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_workflow_template( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_workflow_template( + workflow_templates.GetWorkflowTemplateRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_workflow_template( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_workflow_template( + workflow_templates.GetWorkflowTemplateRequest(), + name='name_value', + ) + + +def test_instantiate_workflow_template(transport: str = 'grpc', request_type=workflow_templates.InstantiateWorkflowTemplateRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_instantiate_workflow_template_from_dict(): + test_instantiate_workflow_template(request_type=dict) + + +def test_instantiate_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + client.instantiate_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.InstantiateWorkflowTemplateRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_async_from_dict(): + await test_instantiate_workflow_template_async(request_type=dict) + + +def test_instantiate_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateWorkflowTemplateRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateWorkflowTemplateRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_instantiate_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.instantiate_workflow_template( + name='name_value', + parameters={'key_value': 'value_value'}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].parameters == {'key_value': 'value_value'} + + +def test_instantiate_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.instantiate_workflow_template( + workflow_templates.InstantiateWorkflowTemplateRequest(), + name='name_value', + parameters={'key_value': 'value_value'}, + ) + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.instantiate_workflow_template( + name='name_value', + parameters={'key_value': 'value_value'}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].parameters == {'key_value': 'value_value'} + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.instantiate_workflow_template( + workflow_templates.InstantiateWorkflowTemplateRequest(), + name='name_value', + parameters={'key_value': 'value_value'}, + ) + + +def test_instantiate_inline_workflow_template(transport: str = 'grpc', request_type=workflow_templates.InstantiateInlineWorkflowTemplateRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_instantiate_inline_workflow_template_from_dict(): + test_instantiate_inline_workflow_template(request_type=dict) + + +def test_instantiate_inline_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + client.instantiate_inline_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.InstantiateInlineWorkflowTemplateRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_async_from_dict(): + await test_instantiate_inline_workflow_template_async(request_type=dict) + + +def test_instantiate_inline_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_instantiate_inline_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.instantiate_inline_workflow_template( + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') + + +def test_instantiate_inline_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.instantiate_inline_workflow_template( + workflow_templates.InstantiateInlineWorkflowTemplateRequest(), + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.instantiate_inline_workflow_template( + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.instantiate_inline_workflow_template( + workflow_templates.InstantiateInlineWorkflowTemplateRequest(), + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + +def test_update_workflow_template(transport: str = 'grpc', request_type=workflow_templates.UpdateWorkflowTemplateRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate( + id='id_value', + name='name_value', + version=774, + ) + response = client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + assert response.id == 'id_value' + assert response.name == 'name_value' + assert response.version == 774 + + +def test_update_workflow_template_from_dict(): + test_update_workflow_template(request_type=dict) + + +def test_update_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + client.update_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() + + +@pytest.mark.asyncio +async def test_update_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.UpdateWorkflowTemplateRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate( + id='id_value', + name='name_value', + version=774, + )) + response = await client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + assert response.id == 'id_value' + assert response.name == 'name_value' + assert response.version == 774 + + +@pytest.mark.asyncio +async def test_update_workflow_template_async_from_dict(): + await test_update_workflow_template_async(request_type=dict) + + +def test_update_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.UpdateWorkflowTemplateRequest() + + request.template.name = 'template.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + call.return_value = workflow_templates.WorkflowTemplate() + client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'template.name=template.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.UpdateWorkflowTemplateRequest() + + request.template.name = 'template.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) + await client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'template.name=template.name/value', + ) in kw['metadata'] + + +def test_update_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_workflow_template( + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') + + +def test_update_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_workflow_template( + workflow_templates.UpdateWorkflowTemplateRequest(), + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + +@pytest.mark.asyncio +async def test_update_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_workflow_template( + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') + + +@pytest.mark.asyncio +async def test_update_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_workflow_template( + workflow_templates.UpdateWorkflowTemplateRequest(), + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + +def test_list_workflow_templates(transport: str = 'grpc', request_type=workflow_templates.ListWorkflowTemplatesRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.ListWorkflowTemplatesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListWorkflowTemplatesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_workflow_templates_from_dict(): + test_list_workflow_templates(request_type=dict) + + +def test_list_workflow_templates_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + client.list_workflow_templates() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() + + +@pytest.mark.asyncio +async def test_list_workflow_templates_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.ListWorkflowTemplatesRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.ListWorkflowTemplatesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListWorkflowTemplatesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_workflow_templates_async_from_dict(): + await test_list_workflow_templates_async(request_type=dict) + + +def test_list_workflow_templates_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.ListWorkflowTemplatesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + call.return_value = workflow_templates.ListWorkflowTemplatesResponse() + client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_workflow_templates_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.ListWorkflowTemplatesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.ListWorkflowTemplatesResponse()) + await client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_workflow_templates_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.ListWorkflowTemplatesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_workflow_templates( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_workflow_templates_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_workflow_templates( + workflow_templates.ListWorkflowTemplatesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_workflow_templates_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.ListWorkflowTemplatesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.ListWorkflowTemplatesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_workflow_templates( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_workflow_templates_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_workflow_templates( + workflow_templates.ListWorkflowTemplatesRequest(), + parent='parent_value', + ) + + +def test_list_workflow_templates_pager(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token='abc', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], + next_page_token='def', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + ], + next_page_token='ghi', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_workflow_templates(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, workflow_templates.WorkflowTemplate) + for i in results) + +def test_list_workflow_templates_pages(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token='abc', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], + next_page_token='def', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + ], + next_page_token='ghi', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + pages = list(client.list_workflow_templates(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_workflow_templates_async_pager(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token='abc', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], + next_page_token='def', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + ], + next_page_token='ghi', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_workflow_templates(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, workflow_templates.WorkflowTemplate) + for i in responses) + +@pytest.mark.asyncio +async def test_list_workflow_templates_async_pages(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token='abc', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], + next_page_token='def', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + ], + next_page_token='ghi', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_workflow_templates(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_workflow_template(transport: str = 'grpc', request_type=workflow_templates.DeleteWorkflowTemplateRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_workflow_template_from_dict(): + test_delete_workflow_template(request_type=dict) + + +def test_delete_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + client.delete_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() + + +@pytest.mark.asyncio +async def test_delete_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.DeleteWorkflowTemplateRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_workflow_template_async_from_dict(): + await test_delete_workflow_template_async(request_type=dict) + + +def test_delete_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.DeleteWorkflowTemplateRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + call.return_value = None + client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.DeleteWorkflowTemplateRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_workflow_template( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_workflow_template( + workflow_templates.DeleteWorkflowTemplateRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_workflow_template( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_workflow_template( + workflow_templates.DeleteWorkflowTemplateRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = WorkflowTemplateServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = WorkflowTemplateServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = WorkflowTemplateServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.WorkflowTemplateServiceGrpcTransport, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.WorkflowTemplateServiceGrpcTransport, + ) + +def test_workflow_template_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.WorkflowTemplateServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_workflow_template_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.WorkflowTemplateServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_workflow_template', + 'get_workflow_template', + 'instantiate_workflow_template', + 'instantiate_inline_workflow_template', + 'update_workflow_template', + 'list_workflow_templates', + 'delete_workflow_template', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_workflow_template_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.WorkflowTemplateServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_workflow_template_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.WorkflowTemplateServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_workflow_template_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.WorkflowTemplateServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_workflow_template_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + WorkflowTemplateServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_workflow_template_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + WorkflowTemplateServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.WorkflowTemplateServiceGrpcTransport, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_workflow_template_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.WorkflowTemplateServiceGrpcTransport, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_workflow_template_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.WorkflowTemplateServiceGrpcTransport, grpc_helpers), + (transports.WorkflowTemplateServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_workflow_template_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "dataproc.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="dataproc.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.WorkflowTemplateServiceGrpcTransport, transports.WorkflowTemplateServiceGrpcAsyncIOTransport]) +def test_workflow_template_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_workflow_template_service_host_no_port(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), + ) + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_workflow_template_service_host_with_port(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), + ) + assert client.transport._host == 'dataproc.googleapis.com:8000' + +def test_workflow_template_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.WorkflowTemplateServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_workflow_template_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.WorkflowTemplateServiceGrpcTransport, transports.WorkflowTemplateServiceGrpcAsyncIOTransport]) +def test_workflow_template_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.WorkflowTemplateServiceGrpcTransport, transports.WorkflowTemplateServiceGrpcAsyncIOTransport]) +def test_workflow_template_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_workflow_template_service_grpc_lro_client(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_workflow_template_service_grpc_lro_async_client(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_cluster_path(): + project = "squid" + location = "clam" + cluster = "whelk" + expected = "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) + actual = WorkflowTemplateServiceClient.cluster_path(project, location, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "octopus", + "location": "oyster", + "cluster": "nudibranch", + } + path = WorkflowTemplateServiceClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_cluster_path(path) + assert expected == actual + +def test_service_path(): + project = "cuttlefish" + location = "mussel" + service = "winkle" + expected = "projects/{project}/locations/{location}/services/{service}".format(project=project, location=location, service=service, ) + actual = WorkflowTemplateServiceClient.service_path(project, location, service) + assert expected == actual + + +def test_parse_service_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "service": "abalone", + } + path = WorkflowTemplateServiceClient.service_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_service_path(path) + assert expected == actual + +def test_workflow_template_path(): + project = "squid" + region = "clam" + workflow_template = "whelk" + expected = "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format(project=project, region=region, workflow_template=workflow_template, ) + actual = WorkflowTemplateServiceClient.workflow_template_path(project, region, workflow_template) + assert expected == actual + + +def test_parse_workflow_template_path(): + expected = { + "project": "octopus", + "region": "oyster", + "workflow_template": "nudibranch", + } + path = WorkflowTemplateServiceClient.workflow_template_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_workflow_template_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = WorkflowTemplateServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = WorkflowTemplateServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = WorkflowTemplateServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = WorkflowTemplateServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = WorkflowTemplateServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = WorkflowTemplateServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = WorkflowTemplateServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = WorkflowTemplateServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = WorkflowTemplateServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = WorkflowTemplateServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.WorkflowTemplateServiceTransport, '_prep_wrapped_messages') as prep: + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.WorkflowTemplateServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = WorkflowTemplateServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta2/.coveragerc b/owl-bot-staging/v1beta2/.coveragerc new file mode 100644 index 00000000..240638d1 --- /dev/null +++ b/owl-bot-staging/v1beta2/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/dataproc/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta2/MANIFEST.in b/owl-bot-staging/v1beta2/MANIFEST.in new file mode 100644 index 00000000..450e5822 --- /dev/null +++ b/owl-bot-staging/v1beta2/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/dataproc *.py +recursive-include google/cloud/dataproc_v1beta2 *.py diff --git a/owl-bot-staging/v1beta2/README.rst b/owl-bot-staging/v1beta2/README.rst new file mode 100644 index 00000000..b751dfd9 --- /dev/null +++ b/owl-bot-staging/v1beta2/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Dataproc API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Dataproc API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta2/docs/conf.py b/owl-bot-staging/v1beta2/docs/conf.py new file mode 100644 index 00000000..02417582 --- /dev/null +++ b/owl-bot-staging/v1beta2/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-dataproc documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-dataproc" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-dataproc-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-dataproc.tex", + u"google-cloud-dataproc Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-dataproc", + u"Google Cloud Dataproc Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-dataproc", + u"google-cloud-dataproc Documentation", + author, + "google-cloud-dataproc", + "GAPIC library for Google Cloud Dataproc API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/autoscaling_policy_service.rst b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/autoscaling_policy_service.rst new file mode 100644 index 00000000..cc81bb57 --- /dev/null +++ b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/autoscaling_policy_service.rst @@ -0,0 +1,10 @@ +AutoscalingPolicyService +------------------------------------------ + +.. automodule:: google.cloud.dataproc_v1beta2.services.autoscaling_policy_service + :members: + :inherited-members: + +.. automodule:: google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/cluster_controller.rst b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/cluster_controller.rst new file mode 100644 index 00000000..3e375a37 --- /dev/null +++ b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/cluster_controller.rst @@ -0,0 +1,10 @@ +ClusterController +----------------------------------- + +.. automodule:: google.cloud.dataproc_v1beta2.services.cluster_controller + :members: + :inherited-members: + +.. automodule:: google.cloud.dataproc_v1beta2.services.cluster_controller.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/job_controller.rst b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/job_controller.rst new file mode 100644 index 00000000..8ca76058 --- /dev/null +++ b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/job_controller.rst @@ -0,0 +1,10 @@ +JobController +------------------------------- + +.. automodule:: google.cloud.dataproc_v1beta2.services.job_controller + :members: + :inherited-members: + +.. automodule:: google.cloud.dataproc_v1beta2.services.job_controller.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/services.rst b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/services.rst new file mode 100644 index 00000000..23c2d640 --- /dev/null +++ b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/services.rst @@ -0,0 +1,9 @@ +Services for Google Cloud Dataproc v1beta2 API +============================================== +.. toctree:: + :maxdepth: 2 + + autoscaling_policy_service + cluster_controller + job_controller + workflow_template_service diff --git a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/types.rst b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/types.rst new file mode 100644 index 00000000..1358e4c1 --- /dev/null +++ b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Dataproc v1beta2 API +=========================================== + +.. automodule:: google.cloud.dataproc_v1beta2.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/workflow_template_service.rst b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/workflow_template_service.rst new file mode 100644 index 00000000..d93e941b --- /dev/null +++ b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/workflow_template_service.rst @@ -0,0 +1,10 @@ +WorkflowTemplateService +----------------------------------------- + +.. automodule:: google.cloud.dataproc_v1beta2.services.workflow_template_service + :members: + :inherited-members: + +.. automodule:: google.cloud.dataproc_v1beta2.services.workflow_template_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta2/docs/index.rst b/owl-bot-staging/v1beta2/docs/index.rst new file mode 100644 index 00000000..53ec37c5 --- /dev/null +++ b/owl-bot-staging/v1beta2/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + dataproc_v1beta2/services + dataproc_v1beta2/types diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc/__init__.py new file mode 100644 index 00000000..0e182517 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc/__init__.py @@ -0,0 +1,205 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.client import AutoscalingPolicyServiceClient +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.async_client import AutoscalingPolicyServiceAsyncClient +from google.cloud.dataproc_v1beta2.services.cluster_controller.client import ClusterControllerClient +from google.cloud.dataproc_v1beta2.services.cluster_controller.async_client import ClusterControllerAsyncClient +from google.cloud.dataproc_v1beta2.services.job_controller.client import JobControllerClient +from google.cloud.dataproc_v1beta2.services.job_controller.async_client import JobControllerAsyncClient +from google.cloud.dataproc_v1beta2.services.workflow_template_service.client import WorkflowTemplateServiceClient +from google.cloud.dataproc_v1beta2.services.workflow_template_service.async_client import WorkflowTemplateServiceAsyncClient + +from google.cloud.dataproc_v1beta2.types.autoscaling_policies import AutoscalingPolicy +from google.cloud.dataproc_v1beta2.types.autoscaling_policies import BasicAutoscalingAlgorithm +from google.cloud.dataproc_v1beta2.types.autoscaling_policies import BasicYarnAutoscalingConfig +from google.cloud.dataproc_v1beta2.types.autoscaling_policies import CreateAutoscalingPolicyRequest +from google.cloud.dataproc_v1beta2.types.autoscaling_policies import DeleteAutoscalingPolicyRequest +from google.cloud.dataproc_v1beta2.types.autoscaling_policies import GetAutoscalingPolicyRequest +from google.cloud.dataproc_v1beta2.types.autoscaling_policies import InstanceGroupAutoscalingPolicyConfig +from google.cloud.dataproc_v1beta2.types.autoscaling_policies import ListAutoscalingPoliciesRequest +from google.cloud.dataproc_v1beta2.types.autoscaling_policies import ListAutoscalingPoliciesResponse +from google.cloud.dataproc_v1beta2.types.autoscaling_policies import UpdateAutoscalingPolicyRequest +from google.cloud.dataproc_v1beta2.types.clusters import AcceleratorConfig +from google.cloud.dataproc_v1beta2.types.clusters import AutoscalingConfig +from google.cloud.dataproc_v1beta2.types.clusters import Cluster +from google.cloud.dataproc_v1beta2.types.clusters import ClusterConfig +from google.cloud.dataproc_v1beta2.types.clusters import ClusterMetrics +from google.cloud.dataproc_v1beta2.types.clusters import ClusterStatus +from google.cloud.dataproc_v1beta2.types.clusters import CreateClusterRequest +from google.cloud.dataproc_v1beta2.types.clusters import DeleteClusterRequest +from google.cloud.dataproc_v1beta2.types.clusters import DiagnoseClusterRequest +from google.cloud.dataproc_v1beta2.types.clusters import DiagnoseClusterResults +from google.cloud.dataproc_v1beta2.types.clusters import DiskConfig +from google.cloud.dataproc_v1beta2.types.clusters import EncryptionConfig +from google.cloud.dataproc_v1beta2.types.clusters import EndpointConfig +from google.cloud.dataproc_v1beta2.types.clusters import GceClusterConfig +from google.cloud.dataproc_v1beta2.types.clusters import GetClusterRequest +from google.cloud.dataproc_v1beta2.types.clusters import GkeClusterConfig +from google.cloud.dataproc_v1beta2.types.clusters import InstanceGroupConfig +from google.cloud.dataproc_v1beta2.types.clusters import KerberosConfig +from google.cloud.dataproc_v1beta2.types.clusters import LifecycleConfig +from google.cloud.dataproc_v1beta2.types.clusters import ListClustersRequest +from google.cloud.dataproc_v1beta2.types.clusters import ListClustersResponse +from google.cloud.dataproc_v1beta2.types.clusters import ManagedGroupConfig +from google.cloud.dataproc_v1beta2.types.clusters import NodeInitializationAction +from google.cloud.dataproc_v1beta2.types.clusters import ReservationAffinity +from google.cloud.dataproc_v1beta2.types.clusters import SecurityConfig +from google.cloud.dataproc_v1beta2.types.clusters import SoftwareConfig +from google.cloud.dataproc_v1beta2.types.clusters import UpdateClusterRequest +from google.cloud.dataproc_v1beta2.types.jobs import CancelJobRequest +from google.cloud.dataproc_v1beta2.types.jobs import DeleteJobRequest +from google.cloud.dataproc_v1beta2.types.jobs import GetJobRequest +from google.cloud.dataproc_v1beta2.types.jobs import HadoopJob +from google.cloud.dataproc_v1beta2.types.jobs import HiveJob +from google.cloud.dataproc_v1beta2.types.jobs import Job +from google.cloud.dataproc_v1beta2.types.jobs import JobMetadata +from google.cloud.dataproc_v1beta2.types.jobs import JobPlacement +from google.cloud.dataproc_v1beta2.types.jobs import JobReference +from google.cloud.dataproc_v1beta2.types.jobs import JobScheduling +from google.cloud.dataproc_v1beta2.types.jobs import JobStatus +from google.cloud.dataproc_v1beta2.types.jobs import ListJobsRequest +from google.cloud.dataproc_v1beta2.types.jobs import ListJobsResponse +from google.cloud.dataproc_v1beta2.types.jobs import LoggingConfig +from google.cloud.dataproc_v1beta2.types.jobs import PigJob +from google.cloud.dataproc_v1beta2.types.jobs import PrestoJob +from google.cloud.dataproc_v1beta2.types.jobs import PySparkJob +from google.cloud.dataproc_v1beta2.types.jobs import QueryList +from google.cloud.dataproc_v1beta2.types.jobs import SparkJob +from google.cloud.dataproc_v1beta2.types.jobs import SparkRJob +from google.cloud.dataproc_v1beta2.types.jobs import SparkSqlJob +from google.cloud.dataproc_v1beta2.types.jobs import SubmitJobRequest +from google.cloud.dataproc_v1beta2.types.jobs import UpdateJobRequest +from google.cloud.dataproc_v1beta2.types.jobs import YarnApplication +from google.cloud.dataproc_v1beta2.types.operations import ClusterOperationMetadata +from google.cloud.dataproc_v1beta2.types.operations import ClusterOperationStatus +from google.cloud.dataproc_v1beta2.types.shared import Component +from google.cloud.dataproc_v1beta2.types.workflow_templates import ClusterOperation +from google.cloud.dataproc_v1beta2.types.workflow_templates import ClusterSelector +from google.cloud.dataproc_v1beta2.types.workflow_templates import CreateWorkflowTemplateRequest +from google.cloud.dataproc_v1beta2.types.workflow_templates import DeleteWorkflowTemplateRequest +from google.cloud.dataproc_v1beta2.types.workflow_templates import GetWorkflowTemplateRequest +from google.cloud.dataproc_v1beta2.types.workflow_templates import InstantiateInlineWorkflowTemplateRequest +from google.cloud.dataproc_v1beta2.types.workflow_templates import InstantiateWorkflowTemplateRequest +from google.cloud.dataproc_v1beta2.types.workflow_templates import ListWorkflowTemplatesRequest +from google.cloud.dataproc_v1beta2.types.workflow_templates import ListWorkflowTemplatesResponse +from google.cloud.dataproc_v1beta2.types.workflow_templates import ManagedCluster +from google.cloud.dataproc_v1beta2.types.workflow_templates import OrderedJob +from google.cloud.dataproc_v1beta2.types.workflow_templates import ParameterValidation +from google.cloud.dataproc_v1beta2.types.workflow_templates import RegexValidation +from google.cloud.dataproc_v1beta2.types.workflow_templates import TemplateParameter +from google.cloud.dataproc_v1beta2.types.workflow_templates import UpdateWorkflowTemplateRequest +from google.cloud.dataproc_v1beta2.types.workflow_templates import ValueValidation +from google.cloud.dataproc_v1beta2.types.workflow_templates import WorkflowGraph +from google.cloud.dataproc_v1beta2.types.workflow_templates import WorkflowMetadata +from google.cloud.dataproc_v1beta2.types.workflow_templates import WorkflowNode +from google.cloud.dataproc_v1beta2.types.workflow_templates import WorkflowTemplate +from google.cloud.dataproc_v1beta2.types.workflow_templates import WorkflowTemplatePlacement + +__all__ = ('AutoscalingPolicyServiceClient', + 'AutoscalingPolicyServiceAsyncClient', + 'ClusterControllerClient', + 'ClusterControllerAsyncClient', + 'JobControllerClient', + 'JobControllerAsyncClient', + 'WorkflowTemplateServiceClient', + 'WorkflowTemplateServiceAsyncClient', + 'AutoscalingPolicy', + 'BasicAutoscalingAlgorithm', + 'BasicYarnAutoscalingConfig', + 'CreateAutoscalingPolicyRequest', + 'DeleteAutoscalingPolicyRequest', + 'GetAutoscalingPolicyRequest', + 'InstanceGroupAutoscalingPolicyConfig', + 'ListAutoscalingPoliciesRequest', + 'ListAutoscalingPoliciesResponse', + 'UpdateAutoscalingPolicyRequest', + 'AcceleratorConfig', + 'AutoscalingConfig', + 'Cluster', + 'ClusterConfig', + 'ClusterMetrics', + 'ClusterStatus', + 'CreateClusterRequest', + 'DeleteClusterRequest', + 'DiagnoseClusterRequest', + 'DiagnoseClusterResults', + 'DiskConfig', + 'EncryptionConfig', + 'EndpointConfig', + 'GceClusterConfig', + 'GetClusterRequest', + 'GkeClusterConfig', + 'InstanceGroupConfig', + 'KerberosConfig', + 'LifecycleConfig', + 'ListClustersRequest', + 'ListClustersResponse', + 'ManagedGroupConfig', + 'NodeInitializationAction', + 'ReservationAffinity', + 'SecurityConfig', + 'SoftwareConfig', + 'UpdateClusterRequest', + 'CancelJobRequest', + 'DeleteJobRequest', + 'GetJobRequest', + 'HadoopJob', + 'HiveJob', + 'Job', + 'JobMetadata', + 'JobPlacement', + 'JobReference', + 'JobScheduling', + 'JobStatus', + 'ListJobsRequest', + 'ListJobsResponse', + 'LoggingConfig', + 'PigJob', + 'PrestoJob', + 'PySparkJob', + 'QueryList', + 'SparkJob', + 'SparkRJob', + 'SparkSqlJob', + 'SubmitJobRequest', + 'UpdateJobRequest', + 'YarnApplication', + 'ClusterOperationMetadata', + 'ClusterOperationStatus', + 'Component', + 'ClusterOperation', + 'ClusterSelector', + 'CreateWorkflowTemplateRequest', + 'DeleteWorkflowTemplateRequest', + 'GetWorkflowTemplateRequest', + 'InstantiateInlineWorkflowTemplateRequest', + 'InstantiateWorkflowTemplateRequest', + 'ListWorkflowTemplatesRequest', + 'ListWorkflowTemplatesResponse', + 'ManagedCluster', + 'OrderedJob', + 'ParameterValidation', + 'RegexValidation', + 'TemplateParameter', + 'UpdateWorkflowTemplateRequest', + 'ValueValidation', + 'WorkflowGraph', + 'WorkflowMetadata', + 'WorkflowNode', + 'WorkflowTemplate', + 'WorkflowTemplatePlacement', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc/py.typed b/owl-bot-staging/v1beta2/google/cloud/dataproc/py.typed new file mode 100644 index 00000000..aac99cba --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-dataproc package uses inline types. diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/__init__.py new file mode 100644 index 00000000..a143d99f --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/__init__.py @@ -0,0 +1,206 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .services.autoscaling_policy_service import AutoscalingPolicyServiceClient +from .services.autoscaling_policy_service import AutoscalingPolicyServiceAsyncClient +from .services.cluster_controller import ClusterControllerClient +from .services.cluster_controller import ClusterControllerAsyncClient +from .services.job_controller import JobControllerClient +from .services.job_controller import JobControllerAsyncClient +from .services.workflow_template_service import WorkflowTemplateServiceClient +from .services.workflow_template_service import WorkflowTemplateServiceAsyncClient + +from .types.autoscaling_policies import AutoscalingPolicy +from .types.autoscaling_policies import BasicAutoscalingAlgorithm +from .types.autoscaling_policies import BasicYarnAutoscalingConfig +from .types.autoscaling_policies import CreateAutoscalingPolicyRequest +from .types.autoscaling_policies import DeleteAutoscalingPolicyRequest +from .types.autoscaling_policies import GetAutoscalingPolicyRequest +from .types.autoscaling_policies import InstanceGroupAutoscalingPolicyConfig +from .types.autoscaling_policies import ListAutoscalingPoliciesRequest +from .types.autoscaling_policies import ListAutoscalingPoliciesResponse +from .types.autoscaling_policies import UpdateAutoscalingPolicyRequest +from .types.clusters import AcceleratorConfig +from .types.clusters import AutoscalingConfig +from .types.clusters import Cluster +from .types.clusters import ClusterConfig +from .types.clusters import ClusterMetrics +from .types.clusters import ClusterStatus +from .types.clusters import CreateClusterRequest +from .types.clusters import DeleteClusterRequest +from .types.clusters import DiagnoseClusterRequest +from .types.clusters import DiagnoseClusterResults +from .types.clusters import DiskConfig +from .types.clusters import EncryptionConfig +from .types.clusters import EndpointConfig +from .types.clusters import GceClusterConfig +from .types.clusters import GetClusterRequest +from .types.clusters import GkeClusterConfig +from .types.clusters import InstanceGroupConfig +from .types.clusters import KerberosConfig +from .types.clusters import LifecycleConfig +from .types.clusters import ListClustersRequest +from .types.clusters import ListClustersResponse +from .types.clusters import ManagedGroupConfig +from .types.clusters import NodeInitializationAction +from .types.clusters import ReservationAffinity +from .types.clusters import SecurityConfig +from .types.clusters import SoftwareConfig +from .types.clusters import UpdateClusterRequest +from .types.jobs import CancelJobRequest +from .types.jobs import DeleteJobRequest +from .types.jobs import GetJobRequest +from .types.jobs import HadoopJob +from .types.jobs import HiveJob +from .types.jobs import Job +from .types.jobs import JobMetadata +from .types.jobs import JobPlacement +from .types.jobs import JobReference +from .types.jobs import JobScheduling +from .types.jobs import JobStatus +from .types.jobs import ListJobsRequest +from .types.jobs import ListJobsResponse +from .types.jobs import LoggingConfig +from .types.jobs import PigJob +from .types.jobs import PrestoJob +from .types.jobs import PySparkJob +from .types.jobs import QueryList +from .types.jobs import SparkJob +from .types.jobs import SparkRJob +from .types.jobs import SparkSqlJob +from .types.jobs import SubmitJobRequest +from .types.jobs import UpdateJobRequest +from .types.jobs import YarnApplication +from .types.operations import ClusterOperationMetadata +from .types.operations import ClusterOperationStatus +from .types.shared import Component +from .types.workflow_templates import ClusterOperation +from .types.workflow_templates import ClusterSelector +from .types.workflow_templates import CreateWorkflowTemplateRequest +from .types.workflow_templates import DeleteWorkflowTemplateRequest +from .types.workflow_templates import GetWorkflowTemplateRequest +from .types.workflow_templates import InstantiateInlineWorkflowTemplateRequest +from .types.workflow_templates import InstantiateWorkflowTemplateRequest +from .types.workflow_templates import ListWorkflowTemplatesRequest +from .types.workflow_templates import ListWorkflowTemplatesResponse +from .types.workflow_templates import ManagedCluster +from .types.workflow_templates import OrderedJob +from .types.workflow_templates import ParameterValidation +from .types.workflow_templates import RegexValidation +from .types.workflow_templates import TemplateParameter +from .types.workflow_templates import UpdateWorkflowTemplateRequest +from .types.workflow_templates import ValueValidation +from .types.workflow_templates import WorkflowGraph +from .types.workflow_templates import WorkflowMetadata +from .types.workflow_templates import WorkflowNode +from .types.workflow_templates import WorkflowTemplate +from .types.workflow_templates import WorkflowTemplatePlacement + +__all__ = ( + 'AutoscalingPolicyServiceAsyncClient', + 'ClusterControllerAsyncClient', + 'JobControllerAsyncClient', + 'WorkflowTemplateServiceAsyncClient', +'AcceleratorConfig', +'AutoscalingConfig', +'AutoscalingPolicy', +'AutoscalingPolicyServiceClient', +'BasicAutoscalingAlgorithm', +'BasicYarnAutoscalingConfig', +'CancelJobRequest', +'Cluster', +'ClusterConfig', +'ClusterControllerClient', +'ClusterMetrics', +'ClusterOperation', +'ClusterOperationMetadata', +'ClusterOperationStatus', +'ClusterSelector', +'ClusterStatus', +'Component', +'CreateAutoscalingPolicyRequest', +'CreateClusterRequest', +'CreateWorkflowTemplateRequest', +'DeleteAutoscalingPolicyRequest', +'DeleteClusterRequest', +'DeleteJobRequest', +'DeleteWorkflowTemplateRequest', +'DiagnoseClusterRequest', +'DiagnoseClusterResults', +'DiskConfig', +'EncryptionConfig', +'EndpointConfig', +'GceClusterConfig', +'GetAutoscalingPolicyRequest', +'GetClusterRequest', +'GetJobRequest', +'GetWorkflowTemplateRequest', +'GkeClusterConfig', +'HadoopJob', +'HiveJob', +'InstanceGroupAutoscalingPolicyConfig', +'InstanceGroupConfig', +'InstantiateInlineWorkflowTemplateRequest', +'InstantiateWorkflowTemplateRequest', +'Job', +'JobControllerClient', +'JobMetadata', +'JobPlacement', +'JobReference', +'JobScheduling', +'JobStatus', +'KerberosConfig', +'LifecycleConfig', +'ListAutoscalingPoliciesRequest', +'ListAutoscalingPoliciesResponse', +'ListClustersRequest', +'ListClustersResponse', +'ListJobsRequest', +'ListJobsResponse', +'ListWorkflowTemplatesRequest', +'ListWorkflowTemplatesResponse', +'LoggingConfig', +'ManagedCluster', +'ManagedGroupConfig', +'NodeInitializationAction', +'OrderedJob', +'ParameterValidation', +'PigJob', +'PrestoJob', +'PySparkJob', +'QueryList', +'RegexValidation', +'ReservationAffinity', +'SecurityConfig', +'SoftwareConfig', +'SparkJob', +'SparkRJob', +'SparkSqlJob', +'SubmitJobRequest', +'TemplateParameter', +'UpdateAutoscalingPolicyRequest', +'UpdateClusterRequest', +'UpdateJobRequest', +'UpdateWorkflowTemplateRequest', +'ValueValidation', +'WorkflowGraph', +'WorkflowMetadata', +'WorkflowNode', +'WorkflowTemplate', +'WorkflowTemplatePlacement', +'WorkflowTemplateServiceClient', +'YarnApplication', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/gapic_metadata.json b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/gapic_metadata.json new file mode 100644 index 00000000..c20241a8 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/gapic_metadata.json @@ -0,0 +1,315 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.dataproc_v1beta2", + "protoPackage": "google.cloud.dataproc.v1beta2", + "schema": "1.0", + "services": { + "AutoscalingPolicyService": { + "clients": { + "grpc": { + "libraryClient": "AutoscalingPolicyServiceClient", + "rpcs": { + "CreateAutoscalingPolicy": { + "methods": [ + "create_autoscaling_policy" + ] + }, + "DeleteAutoscalingPolicy": { + "methods": [ + "delete_autoscaling_policy" + ] + }, + "GetAutoscalingPolicy": { + "methods": [ + "get_autoscaling_policy" + ] + }, + "ListAutoscalingPolicies": { + "methods": [ + "list_autoscaling_policies" + ] + }, + "UpdateAutoscalingPolicy": { + "methods": [ + "update_autoscaling_policy" + ] + } + } + }, + "grpc-async": { + "libraryClient": "AutoscalingPolicyServiceAsyncClient", + "rpcs": { + "CreateAutoscalingPolicy": { + "methods": [ + "create_autoscaling_policy" + ] + }, + "DeleteAutoscalingPolicy": { + "methods": [ + "delete_autoscaling_policy" + ] + }, + "GetAutoscalingPolicy": { + "methods": [ + "get_autoscaling_policy" + ] + }, + "ListAutoscalingPolicies": { + "methods": [ + "list_autoscaling_policies" + ] + }, + "UpdateAutoscalingPolicy": { + "methods": [ + "update_autoscaling_policy" + ] + } + } + } + } + }, + "ClusterController": { + "clients": { + "grpc": { + "libraryClient": "ClusterControllerClient", + "rpcs": { + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DiagnoseCluster": { + "methods": [ + "diagnose_cluster" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ClusterControllerAsyncClient", + "rpcs": { + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DiagnoseCluster": { + "methods": [ + "diagnose_cluster" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + } + } + } + } + }, + "JobController": { + "clients": { + "grpc": { + "libraryClient": "JobControllerClient", + "rpcs": { + "CancelJob": { + "methods": [ + "cancel_job" + ] + }, + "DeleteJob": { + "methods": [ + "delete_job" + ] + }, + "GetJob": { + "methods": [ + "get_job" + ] + }, + "ListJobs": { + "methods": [ + "list_jobs" + ] + }, + "SubmitJob": { + "methods": [ + "submit_job" + ] + }, + "SubmitJobAsOperation": { + "methods": [ + "submit_job_as_operation" + ] + }, + "UpdateJob": { + "methods": [ + "update_job" + ] + } + } + }, + "grpc-async": { + "libraryClient": "JobControllerAsyncClient", + "rpcs": { + "CancelJob": { + "methods": [ + "cancel_job" + ] + }, + "DeleteJob": { + "methods": [ + "delete_job" + ] + }, + "GetJob": { + "methods": [ + "get_job" + ] + }, + "ListJobs": { + "methods": [ + "list_jobs" + ] + }, + "SubmitJob": { + "methods": [ + "submit_job" + ] + }, + "SubmitJobAsOperation": { + "methods": [ + "submit_job_as_operation" + ] + }, + "UpdateJob": { + "methods": [ + "update_job" + ] + } + } + } + } + }, + "WorkflowTemplateService": { + "clients": { + "grpc": { + "libraryClient": "WorkflowTemplateServiceClient", + "rpcs": { + "CreateWorkflowTemplate": { + "methods": [ + "create_workflow_template" + ] + }, + "DeleteWorkflowTemplate": { + "methods": [ + "delete_workflow_template" + ] + }, + "GetWorkflowTemplate": { + "methods": [ + "get_workflow_template" + ] + }, + "InstantiateInlineWorkflowTemplate": { + "methods": [ + "instantiate_inline_workflow_template" + ] + }, + "InstantiateWorkflowTemplate": { + "methods": [ + "instantiate_workflow_template" + ] + }, + "ListWorkflowTemplates": { + "methods": [ + "list_workflow_templates" + ] + }, + "UpdateWorkflowTemplate": { + "methods": [ + "update_workflow_template" + ] + } + } + }, + "grpc-async": { + "libraryClient": "WorkflowTemplateServiceAsyncClient", + "rpcs": { + "CreateWorkflowTemplate": { + "methods": [ + "create_workflow_template" + ] + }, + "DeleteWorkflowTemplate": { + "methods": [ + "delete_workflow_template" + ] + }, + "GetWorkflowTemplate": { + "methods": [ + "get_workflow_template" + ] + }, + "InstantiateInlineWorkflowTemplate": { + "methods": [ + "instantiate_inline_workflow_template" + ] + }, + "InstantiateWorkflowTemplate": { + "methods": [ + "instantiate_workflow_template" + ] + }, + "ListWorkflowTemplates": { + "methods": [ + "list_workflow_templates" + ] + }, + "UpdateWorkflowTemplate": { + "methods": [ + "update_workflow_template" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/py.typed b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/py.typed new file mode 100644 index 00000000..aac99cba --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-dataproc package uses inline types. diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/__init__.py new file mode 100644 index 00000000..4de65971 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py new file mode 100644 index 00000000..2401da6f --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import AutoscalingPolicyServiceClient +from .async_client import AutoscalingPolicyServiceAsyncClient + +__all__ = ( + 'AutoscalingPolicyServiceClient', + 'AutoscalingPolicyServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py new file mode 100644 index 00000000..140c14cc --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py @@ -0,0 +1,623 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import pagers +from google.cloud.dataproc_v1beta2.types import autoscaling_policies +from .transports.base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport +from .client import AutoscalingPolicyServiceClient + + +class AutoscalingPolicyServiceAsyncClient: + """The API interface for managing autoscaling policies in the + Cloud Dataproc API. + """ + + _client: AutoscalingPolicyServiceClient + + DEFAULT_ENDPOINT = AutoscalingPolicyServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = AutoscalingPolicyServiceClient.DEFAULT_MTLS_ENDPOINT + + autoscaling_policy_path = staticmethod(AutoscalingPolicyServiceClient.autoscaling_policy_path) + parse_autoscaling_policy_path = staticmethod(AutoscalingPolicyServiceClient.parse_autoscaling_policy_path) + common_billing_account_path = staticmethod(AutoscalingPolicyServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(AutoscalingPolicyServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(AutoscalingPolicyServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_organization_path) + common_project_path = staticmethod(AutoscalingPolicyServiceClient.common_project_path) + parse_common_project_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_project_path) + common_location_path = staticmethod(AutoscalingPolicyServiceClient.common_location_path) + parse_common_location_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceAsyncClient: The constructed client. + """ + return AutoscalingPolicyServiceClient.from_service_account_info.__func__(AutoscalingPolicyServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceAsyncClient: The constructed client. + """ + return AutoscalingPolicyServiceClient.from_service_account_file.__func__(AutoscalingPolicyServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> AutoscalingPolicyServiceTransport: + """Returns the transport used by the client instance. + + Returns: + AutoscalingPolicyServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(AutoscalingPolicyServiceClient).get_transport_class, type(AutoscalingPolicyServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, AutoscalingPolicyServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the autoscaling policy service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AutoscalingPolicyServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = AutoscalingPolicyServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_autoscaling_policy(self, + request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, + *, + parent: str = None, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Creates new autoscaling policy. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.CreateAutoscalingPolicyRequest`): + The request object. A request to create an autoscaling + policy. + parent (:class:`str`): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, + the resource name has the following format: + ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.autoscalingPolicies.create``, + the resource name has the following format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + policy (:class:`google.cloud.dataproc_v1beta2.types.AutoscalingPolicy`): + Required. The autoscaling policy to + create. + + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, policy]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = autoscaling_policies.CreateAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_autoscaling_policy, + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_autoscaling_policy(self, + request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, + *, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.UpdateAutoscalingPolicyRequest`): + The request object. A request to update an autoscaling + policy. + policy (:class:`google.cloud.dataproc_v1beta2.types.AutoscalingPolicy`): + Required. The updated autoscaling + policy. + + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([policy]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_autoscaling_policy, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("policy.name", request.policy.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_autoscaling_policy(self, + request: autoscaling_policies.GetAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Retrieves autoscaling policy. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.GetAutoscalingPolicyRequest`): + The request object. A request to fetch an autoscaling + policy. + name (:class:`str`): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.get``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = autoscaling_policies.GetAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_autoscaling_policy, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_autoscaling_policies(self, + request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAutoscalingPoliciesAsyncPager: + r"""Lists autoscaling policies in the project. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesRequest`): + The request object. A request to list autoscaling + policies in a project. + parent (:class:`str`): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.list``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.pagers.ListAutoscalingPoliciesAsyncPager: + A response to a request to list + autoscaling policies in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_autoscaling_policies, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAutoscalingPoliciesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_autoscaling_policy(self, + request: autoscaling_policies.DeleteAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.DeleteAutoscalingPolicyRequest`): + The request object. A request to delete an autoscaling + policy. + Autoscaling policies in use by one or more clusters will + not be deleted. + name (:class:`str`): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For + ``projects.locations.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = autoscaling_policies.DeleteAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_autoscaling_policy, + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "AutoscalingPolicyServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py new file mode 100644 index 00000000..17b376c4 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py @@ -0,0 +1,789 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import pagers +from google.cloud.dataproc_v1beta2.types import autoscaling_policies +from .transports.base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import AutoscalingPolicyServiceGrpcTransport +from .transports.grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport + + +class AutoscalingPolicyServiceClientMeta(type): + """Metaclass for the AutoscalingPolicyService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[AutoscalingPolicyServiceTransport]] + _transport_registry["grpc"] = AutoscalingPolicyServiceGrpcTransport + _transport_registry["grpc_asyncio"] = AutoscalingPolicyServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[AutoscalingPolicyServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AutoscalingPolicyServiceClient(metaclass=AutoscalingPolicyServiceClientMeta): + """The API interface for managing autoscaling policies in the + Cloud Dataproc API. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> AutoscalingPolicyServiceTransport: + """Returns the transport used by the client instance. + + Returns: + AutoscalingPolicyServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def autoscaling_policy_path(project: str,location: str,autoscaling_policy: str,) -> str: + """Returns a fully-qualified autoscaling_policy string.""" + return "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format(project=project, location=location, autoscaling_policy=autoscaling_policy, ) + + @staticmethod + def parse_autoscaling_policy_path(path: str) -> Dict[str,str]: + """Parses a autoscaling_policy path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/autoscalingPolicies/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, AutoscalingPolicyServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the autoscaling policy service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, AutoscalingPolicyServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, AutoscalingPolicyServiceTransport): + # transport is a AutoscalingPolicyServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_autoscaling_policy(self, + request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, + *, + parent: str = None, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Creates new autoscaling policy. + + Args: + request (google.cloud.dataproc_v1beta2.types.CreateAutoscalingPolicyRequest): + The request object. A request to create an autoscaling + policy. + parent (str): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, + the resource name has the following format: + ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.autoscalingPolicies.create``, + the resource name has the following format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + policy (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): + Required. The autoscaling policy to + create. + + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.CreateAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.CreateAutoscalingPolicyRequest): + request = autoscaling_policies.CreateAutoscalingPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_autoscaling_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_autoscaling_policy(self, + request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, + *, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Args: + request (google.cloud.dataproc_v1beta2.types.UpdateAutoscalingPolicyRequest): + The request object. A request to update an autoscaling + policy. + policy (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): + Required. The updated autoscaling + policy. + + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.UpdateAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.UpdateAutoscalingPolicyRequest): + request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_autoscaling_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("policy.name", request.policy.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_autoscaling_policy(self, + request: autoscaling_policies.GetAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Retrieves autoscaling policy. + + Args: + request (google.cloud.dataproc_v1beta2.types.GetAutoscalingPolicyRequest): + The request object. A request to fetch an autoscaling + policy. + name (str): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.get``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.GetAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.GetAutoscalingPolicyRequest): + request = autoscaling_policies.GetAutoscalingPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_autoscaling_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_autoscaling_policies(self, + request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAutoscalingPoliciesPager: + r"""Lists autoscaling policies in the project. + + Args: + request (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesRequest): + The request object. A request to list autoscaling + policies in a project. + parent (str): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.list``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.pagers.ListAutoscalingPoliciesPager: + A response to a request to list + autoscaling policies in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.ListAutoscalingPoliciesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.ListAutoscalingPoliciesRequest): + request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_autoscaling_policies] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAutoscalingPoliciesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_autoscaling_policy(self, + request: autoscaling_policies.DeleteAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Args: + request (google.cloud.dataproc_v1beta2.types.DeleteAutoscalingPolicyRequest): + The request object. A request to delete an autoscaling + policy. + Autoscaling policies in use by one or more clusters will + not be deleted. + name (str): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For + ``projects.locations.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.DeleteAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.DeleteAutoscalingPolicyRequest): + request = autoscaling_policies.DeleteAutoscalingPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_autoscaling_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "AutoscalingPolicyServiceClient", +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py new file mode 100644 index 00000000..58533089 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.dataproc_v1beta2.types import autoscaling_policies + + +class ListAutoscalingPoliciesPager: + """A pager for iterating through ``list_autoscaling_policies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``policies`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAutoscalingPolicies`` requests and continue to iterate + through the ``policies`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., autoscaling_policies.ListAutoscalingPoliciesResponse], + request: autoscaling_policies.ListAutoscalingPoliciesRequest, + response: autoscaling_policies.ListAutoscalingPoliciesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesRequest): + The initial request object. + response (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[autoscaling_policies.ListAutoscalingPoliciesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[autoscaling_policies.AutoscalingPolicy]: + for page in self.pages: + yield from page.policies + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListAutoscalingPoliciesAsyncPager: + """A pager for iterating through ``list_autoscaling_policies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``policies`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAutoscalingPolicies`` requests and continue to iterate + through the ``policies`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse]], + request: autoscaling_policies.ListAutoscalingPoliciesRequest, + response: autoscaling_policies.ListAutoscalingPoliciesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesRequest): + The initial request object. + response (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[autoscaling_policies.ListAutoscalingPoliciesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[autoscaling_policies.AutoscalingPolicy]: + async def async_generator(): + async for page in self.pages: + for response in page.policies: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py new file mode 100644 index 00000000..55ea5b98 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import AutoscalingPolicyServiceTransport +from .grpc import AutoscalingPolicyServiceGrpcTransport +from .grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[AutoscalingPolicyServiceTransport]] +_transport_registry['grpc'] = AutoscalingPolicyServiceGrpcTransport +_transport_registry['grpc_asyncio'] = AutoscalingPolicyServiceGrpcAsyncIOTransport + +__all__ = ( + 'AutoscalingPolicyServiceTransport', + 'AutoscalingPolicyServiceGrpcTransport', + 'AutoscalingPolicyServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py new file mode 100644 index 00000000..8f916ab4 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1beta2.types import autoscaling_policies +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-dataproc', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class AutoscalingPolicyServiceTransport(abc.ABC): + """Abstract transport class for AutoscalingPolicyService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'dataproc.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_autoscaling_policy: gapic_v1.method.wrap_method( + self.create_autoscaling_policy, + default_timeout=600.0, + client_info=client_info, + ), + self.update_autoscaling_policy: gapic_v1.method.wrap_method( + self.update_autoscaling_policy, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.get_autoscaling_policy: gapic_v1.method.wrap_method( + self.get_autoscaling_policy, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.list_autoscaling_policies: gapic_v1.method.wrap_method( + self.list_autoscaling_policies, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.delete_autoscaling_policy: gapic_v1.method.wrap_method( + self.delete_autoscaling_policy, + default_timeout=600.0, + client_info=client_info, + ), + } + + @property + def create_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.CreateAutoscalingPolicyRequest], + Union[ + autoscaling_policies.AutoscalingPolicy, + Awaitable[autoscaling_policies.AutoscalingPolicy] + ]]: + raise NotImplementedError() + + @property + def update_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.UpdateAutoscalingPolicyRequest], + Union[ + autoscaling_policies.AutoscalingPolicy, + Awaitable[autoscaling_policies.AutoscalingPolicy] + ]]: + raise NotImplementedError() + + @property + def get_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.GetAutoscalingPolicyRequest], + Union[ + autoscaling_policies.AutoscalingPolicy, + Awaitable[autoscaling_policies.AutoscalingPolicy] + ]]: + raise NotImplementedError() + + @property + def list_autoscaling_policies(self) -> Callable[ + [autoscaling_policies.ListAutoscalingPoliciesRequest], + Union[ + autoscaling_policies.ListAutoscalingPoliciesResponse, + Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.DeleteAutoscalingPolicyRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'AutoscalingPolicyServiceTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py new file mode 100644 index 00000000..5e1754bd --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py @@ -0,0 +1,363 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.dataproc_v1beta2.types import autoscaling_policies +from google.protobuf import empty_pb2 # type: ignore +from .base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO + + +class AutoscalingPolicyServiceGrpcTransport(AutoscalingPolicyServiceTransport): + """gRPC backend transport for AutoscalingPolicyService. + + The API interface for managing autoscaling policies in the + Cloud Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def create_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.CreateAutoscalingPolicyRequest], + autoscaling_policies.AutoscalingPolicy]: + r"""Return a callable for the create autoscaling policy method over gRPC. + + Creates new autoscaling policy. + + Returns: + Callable[[~.CreateAutoscalingPolicyRequest], + ~.AutoscalingPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_autoscaling_policy' not in self._stubs: + self._stubs['create_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/CreateAutoscalingPolicy', + request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs['create_autoscaling_policy'] + + @property + def update_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.UpdateAutoscalingPolicyRequest], + autoscaling_policies.AutoscalingPolicy]: + r"""Return a callable for the update autoscaling policy method over gRPC. + + Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Returns: + Callable[[~.UpdateAutoscalingPolicyRequest], + ~.AutoscalingPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_autoscaling_policy' not in self._stubs: + self._stubs['update_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/UpdateAutoscalingPolicy', + request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs['update_autoscaling_policy'] + + @property + def get_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.GetAutoscalingPolicyRequest], + autoscaling_policies.AutoscalingPolicy]: + r"""Return a callable for the get autoscaling policy method over gRPC. + + Retrieves autoscaling policy. + + Returns: + Callable[[~.GetAutoscalingPolicyRequest], + ~.AutoscalingPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_autoscaling_policy' not in self._stubs: + self._stubs['get_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/GetAutoscalingPolicy', + request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs['get_autoscaling_policy'] + + @property + def list_autoscaling_policies(self) -> Callable[ + [autoscaling_policies.ListAutoscalingPoliciesRequest], + autoscaling_policies.ListAutoscalingPoliciesResponse]: + r"""Return a callable for the list autoscaling policies method over gRPC. + + Lists autoscaling policies in the project. + + Returns: + Callable[[~.ListAutoscalingPoliciesRequest], + ~.ListAutoscalingPoliciesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_autoscaling_policies' not in self._stubs: + self._stubs['list_autoscaling_policies'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/ListAutoscalingPolicies', + request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize, + response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize, + ) + return self._stubs['list_autoscaling_policies'] + + @property + def delete_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.DeleteAutoscalingPolicyRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete autoscaling policy method over gRPC. + + Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Returns: + Callable[[~.DeleteAutoscalingPolicyRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_autoscaling_policy' not in self._stubs: + self._stubs['delete_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/DeleteAutoscalingPolicy', + request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_autoscaling_policy'] + + +__all__ = ( + 'AutoscalingPolicyServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..a7fc6b5c --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py @@ -0,0 +1,367 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1beta2.types import autoscaling_policies +from google.protobuf import empty_pb2 # type: ignore +from .base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import AutoscalingPolicyServiceGrpcTransport + + +class AutoscalingPolicyServiceGrpcAsyncIOTransport(AutoscalingPolicyServiceTransport): + """gRPC AsyncIO backend transport for AutoscalingPolicyService. + + The API interface for managing autoscaling policies in the + Cloud Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def create_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.CreateAutoscalingPolicyRequest], + Awaitable[autoscaling_policies.AutoscalingPolicy]]: + r"""Return a callable for the create autoscaling policy method over gRPC. + + Creates new autoscaling policy. + + Returns: + Callable[[~.CreateAutoscalingPolicyRequest], + Awaitable[~.AutoscalingPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_autoscaling_policy' not in self._stubs: + self._stubs['create_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/CreateAutoscalingPolicy', + request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs['create_autoscaling_policy'] + + @property + def update_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.UpdateAutoscalingPolicyRequest], + Awaitable[autoscaling_policies.AutoscalingPolicy]]: + r"""Return a callable for the update autoscaling policy method over gRPC. + + Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Returns: + Callable[[~.UpdateAutoscalingPolicyRequest], + Awaitable[~.AutoscalingPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_autoscaling_policy' not in self._stubs: + self._stubs['update_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/UpdateAutoscalingPolicy', + request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs['update_autoscaling_policy'] + + @property + def get_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.GetAutoscalingPolicyRequest], + Awaitable[autoscaling_policies.AutoscalingPolicy]]: + r"""Return a callable for the get autoscaling policy method over gRPC. + + Retrieves autoscaling policy. + + Returns: + Callable[[~.GetAutoscalingPolicyRequest], + Awaitable[~.AutoscalingPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_autoscaling_policy' not in self._stubs: + self._stubs['get_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/GetAutoscalingPolicy', + request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs['get_autoscaling_policy'] + + @property + def list_autoscaling_policies(self) -> Callable[ + [autoscaling_policies.ListAutoscalingPoliciesRequest], + Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse]]: + r"""Return a callable for the list autoscaling policies method over gRPC. + + Lists autoscaling policies in the project. + + Returns: + Callable[[~.ListAutoscalingPoliciesRequest], + Awaitable[~.ListAutoscalingPoliciesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_autoscaling_policies' not in self._stubs: + self._stubs['list_autoscaling_policies'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/ListAutoscalingPolicies', + request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize, + response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize, + ) + return self._stubs['list_autoscaling_policies'] + + @property + def delete_autoscaling_policy(self) -> Callable[ + [autoscaling_policies.DeleteAutoscalingPolicyRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete autoscaling policy method over gRPC. + + Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Returns: + Callable[[~.DeleteAutoscalingPolicyRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_autoscaling_policy' not in self._stubs: + self._stubs['delete_autoscaling_policy'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/DeleteAutoscalingPolicy', + request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_autoscaling_policy'] + + +__all__ = ( + 'AutoscalingPolicyServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py new file mode 100644 index 00000000..4b4a11d5 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ClusterControllerClient +from .async_client import ClusterControllerAsyncClient + +__all__ = ( + 'ClusterControllerClient', + 'ClusterControllerAsyncClient', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py new file mode 100644 index 00000000..4921e9e6 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py @@ -0,0 +1,923 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1beta2.services.cluster_controller import pagers +from google.cloud.dataproc_v1beta2.types import clusters +from google.cloud.dataproc_v1beta2.types import operations +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from .transports.base import ClusterControllerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport +from .client import ClusterControllerClient + + +class ClusterControllerAsyncClient: + """The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + """ + + _client: ClusterControllerClient + + DEFAULT_ENDPOINT = ClusterControllerClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ClusterControllerClient.DEFAULT_MTLS_ENDPOINT + + cluster_path = staticmethod(ClusterControllerClient.cluster_path) + parse_cluster_path = staticmethod(ClusterControllerClient.parse_cluster_path) + common_billing_account_path = staticmethod(ClusterControllerClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(ClusterControllerClient.parse_common_billing_account_path) + common_folder_path = staticmethod(ClusterControllerClient.common_folder_path) + parse_common_folder_path = staticmethod(ClusterControllerClient.parse_common_folder_path) + common_organization_path = staticmethod(ClusterControllerClient.common_organization_path) + parse_common_organization_path = staticmethod(ClusterControllerClient.parse_common_organization_path) + common_project_path = staticmethod(ClusterControllerClient.common_project_path) + parse_common_project_path = staticmethod(ClusterControllerClient.parse_common_project_path) + common_location_path = staticmethod(ClusterControllerClient.common_location_path) + parse_common_location_path = staticmethod(ClusterControllerClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerAsyncClient: The constructed client. + """ + return ClusterControllerClient.from_service_account_info.__func__(ClusterControllerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerAsyncClient: The constructed client. + """ + return ClusterControllerClient.from_service_account_file.__func__(ClusterControllerAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ClusterControllerTransport: + """Returns the transport used by the client instance. + + Returns: + ClusterControllerTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(ClusterControllerClient).get_transport_class, type(ClusterControllerClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, ClusterControllerTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the cluster controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ClusterControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ClusterControllerClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_cluster(self, + request: clusters.CreateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster: clusters.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.CreateClusterRequest`): + The request object. A request to create a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`google.cloud.dataproc_v1beta2.types.Cluster`): + Required. The cluster to create. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1beta2.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = clusters.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def update_cluster(self, + request: clusters.UpdateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + cluster: clusters.Cluster = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.UpdateClusterRequest`): + The request object. A request to update a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project the cluster belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`google.cloud.dataproc_v1beta2.types.Cluster`): + Required. The changes to the cluster. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Specifies the path, relative to ``Cluster``, + of the field to update. For example, to change the + number of workers in a cluster to 5, the ``update_mask`` + parameter would be specified as + ``config.worker_config.num_instances``, and the + ``PATCH`` request body would specify the new value, as + follows: + + :: + + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } + + Similarly, to change the number of preemptible workers + in a cluster to 5, the ``update_mask`` parameter would + be ``config.secondary_worker_config.num_instances``, and + the ``PATCH`` request body would be set as follows: + + :: + + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: currently only the following fields can be + updated: + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + +
MaskPurpose
labelsUpdates labels
config.worker_config.num_instancesResize primary worker + group
config.secondary_worker_config.num_instancesResize secondary + worker group
config.lifecycle_config.auto_delete_ttlReset MAX TTL + duration
config.lifecycle_config.auto_delete_timeUpdate MAX TTL + deletion timestamp
config.lifecycle_config.idle_delete_ttlUpdate Idle TTL + duration
config.autoscaling_config.policy_uriUse, stop using, or change + autoscaling policies
+ + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1beta2.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name, cluster, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = clusters.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_cluster(self, + request: clusters.DeleteClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.DeleteClusterRequest`): + The request object. A request to delete a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = clusters.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_cluster(self, + request: clusters.GetClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> clusters.Cluster: + r"""Gets the resource representation for a cluster in a + project. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.GetClusterRequest`): + The request object. Request to get the resource + representation for a cluster in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.Cluster: + Describes the identifying + information, config, and status of a + cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = clusters.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_clusters(self, + request: clusters.ListClustersRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListClustersAsyncPager: + r"""Lists all regions/{region}/clusters in a project + alphabetically. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.ListClustersRequest`): + The request object. A request to list the clusters in a + project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter constraining the clusters to list. + Filters are case-sensitive and have the following + syntax: + + field = value [AND [field = value]] ... + + where **field** is one of ``status.state``, + ``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a + label key. **value** can be ``*`` to match all values. + ``status.state`` can be one of the following: + ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, + ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` + contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` + states. ``INACTIVE`` contains the ``DELETING`` and + ``ERROR`` states. ``clusterName`` is the name of the + cluster provided at creation time. Only the logical + ``AND`` operator is supported; space-separated items are + treated as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND clusterName = mycluster AND + labels.env = staging AND labels.starred = \* + + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.services.cluster_controller.pagers.ListClustersAsyncPager: + The list of all clusters in a + project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, filter]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = clusters.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_clusters, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListClustersAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def diagnose_cluster(self, + request: clusters.DiagnoseClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains [Empty][google.protobuf.Empty]. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.DiagnoseClusterRequest`): + The request object. A request to collect cluster + diagnostic information. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = clusters.DiagnoseClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.diagnose_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ClusterControllerAsyncClient", +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py new file mode 100644 index 00000000..dbfd3969 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py @@ -0,0 +1,1070 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1beta2.services.cluster_controller import pagers +from google.cloud.dataproc_v1beta2.types import clusters +from google.cloud.dataproc_v1beta2.types import operations +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from .transports.base import ClusterControllerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ClusterControllerGrpcTransport +from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport + + +class ClusterControllerClientMeta(type): + """Metaclass for the ClusterController client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ClusterControllerTransport]] + _transport_registry["grpc"] = ClusterControllerGrpcTransport + _transport_registry["grpc_asyncio"] = ClusterControllerGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ClusterControllerTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ClusterControllerClient(metaclass=ClusterControllerClientMeta): + """The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ClusterControllerTransport: + """Returns the transport used by the client instance. + + Returns: + ClusterControllerTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def cluster_path(project: str,location: str,cluster: str,) -> str: + """Returns a fully-qualified cluster string.""" + return "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str,str]: + """Parses a cluster path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ClusterControllerTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the cluster controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ClusterControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ClusterControllerTransport): + # transport is a ClusterControllerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_cluster(self, + request: clusters.CreateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster: clusters.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (google.cloud.dataproc_v1beta2.types.CreateClusterRequest): + The request object. A request to create a cluster. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (google.cloud.dataproc_v1beta2.types.Cluster): + Required. The cluster to create. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1beta2.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.CreateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.CreateClusterRequest): + request = clusters.CreateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_cluster] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def update_cluster(self, + request: clusters.UpdateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + cluster: clusters.Cluster = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (google.cloud.dataproc_v1beta2.types.UpdateClusterRequest): + The request object. A request to update a cluster. + project_id (str): + Required. The ID of the Google Cloud + Platform project the cluster belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (str): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (google.cloud.dataproc_v1beta2.types.Cluster): + Required. The changes to the cluster. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Specifies the path, relative to ``Cluster``, + of the field to update. For example, to change the + number of workers in a cluster to 5, the ``update_mask`` + parameter would be specified as + ``config.worker_config.num_instances``, and the + ``PATCH`` request body would specify the new value, as + follows: + + :: + + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } + + Similarly, to change the number of preemptible workers + in a cluster to 5, the ``update_mask`` parameter would + be ``config.secondary_worker_config.num_instances``, and + the ``PATCH`` request body would be set as follows: + + :: + + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: currently only the following fields can be + updated: + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + +
MaskPurpose
labelsUpdates labels
config.worker_config.num_instancesResize primary worker + group
config.secondary_worker_config.num_instancesResize secondary + worker group
config.lifecycle_config.auto_delete_ttlReset MAX TTL + duration
config.lifecycle_config.auto_delete_timeUpdate MAX TTL + deletion timestamp
config.lifecycle_config.idle_delete_ttlUpdate Idle TTL + duration
config.autoscaling_config.policy_uriUse, stop using, or change + autoscaling policies
+ + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1beta2.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name, cluster, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.UpdateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.UpdateClusterRequest): + request = clusters.UpdateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cluster] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_cluster(self, + request: clusters.DeleteClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (google.cloud.dataproc_v1beta2.types.DeleteClusterRequest): + The request object. A request to delete a cluster. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (str): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.DeleteClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.DeleteClusterRequest): + request = clusters.DeleteClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def get_cluster(self, + request: clusters.GetClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> clusters.Cluster: + r"""Gets the resource representation for a cluster in a + project. + + Args: + request (google.cloud.dataproc_v1beta2.types.GetClusterRequest): + The request object. Request to get the resource + representation for a cluster in a project. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (str): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.Cluster: + Describes the identifying + information, config, and status of a + cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.GetClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.GetClusterRequest): + request = clusters.GetClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cluster] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_clusters(self, + request: clusters.ListClustersRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListClustersPager: + r"""Lists all regions/{region}/clusters in a project + alphabetically. + + Args: + request (google.cloud.dataproc_v1beta2.types.ListClustersRequest): + The request object. A request to list the clusters in a + project. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (str): + Optional. A filter constraining the clusters to list. + Filters are case-sensitive and have the following + syntax: + + field = value [AND [field = value]] ... + + where **field** is one of ``status.state``, + ``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a + label key. **value** can be ``*`` to match all values. + ``status.state`` can be one of the following: + ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, + ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` + contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` + states. ``INACTIVE`` contains the ``DELETING`` and + ``ERROR`` states. ``clusterName`` is the name of the + cluster provided at creation time. Only the logical + ``AND`` operator is supported; space-separated items are + treated as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND clusterName = mycluster AND + labels.env = staging AND labels.starred = \* + + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.services.cluster_controller.pagers.ListClustersPager: + The list of all clusters in a + project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, filter]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.ListClustersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.ListClustersRequest): + request = clusters.ListClustersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_clusters] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListClustersPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def diagnose_cluster(self, + request: clusters.DiagnoseClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains [Empty][google.protobuf.Empty]. + + Args: + request (google.cloud.dataproc_v1beta2.types.DiagnoseClusterRequest): + The request object. A request to collect cluster + diagnostic information. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (str): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.DiagnoseClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.DiagnoseClusterRequest): + request = clusters.DiagnoseClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.diagnose_cluster] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ClusterControllerClient", +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py new file mode 100644 index 00000000..10f0cc72 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.dataproc_v1beta2.types import clusters + + +class ListClustersPager: + """A pager for iterating through ``list_clusters`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1beta2.types.ListClustersResponse` object, and + provides an ``__iter__`` method to iterate through its + ``clusters`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListClusters`` requests and continue to iterate + through the ``clusters`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., clusters.ListClustersResponse], + request: clusters.ListClustersRequest, + response: clusters.ListClustersResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1beta2.types.ListClustersRequest): + The initial request object. + response (google.cloud.dataproc_v1beta2.types.ListClustersResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = clusters.ListClustersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[clusters.ListClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[clusters.Cluster]: + for page in self.pages: + yield from page.clusters + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListClustersAsyncPager: + """A pager for iterating through ``list_clusters`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1beta2.types.ListClustersResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``clusters`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListClusters`` requests and continue to iterate + through the ``clusters`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[clusters.ListClustersResponse]], + request: clusters.ListClustersRequest, + response: clusters.ListClustersResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1beta2.types.ListClustersRequest): + The initial request object. + response (google.cloud.dataproc_v1beta2.types.ListClustersResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = clusters.ListClustersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[clusters.ListClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[clusters.Cluster]: + async def async_generator(): + async for page in self.pages: + for response in page.clusters: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py new file mode 100644 index 00000000..9c44d271 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ClusterControllerTransport +from .grpc import ClusterControllerGrpcTransport +from .grpc_asyncio import ClusterControllerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterControllerTransport]] +_transport_registry['grpc'] = ClusterControllerGrpcTransport +_transport_registry['grpc_asyncio'] = ClusterControllerGrpcAsyncIOTransport + +__all__ = ( + 'ClusterControllerTransport', + 'ClusterControllerGrpcTransport', + 'ClusterControllerGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py new file mode 100644 index 00000000..d4a71b3e --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py @@ -0,0 +1,285 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1beta2.types import clusters +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-dataproc', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class ClusterControllerTransport(abc.ABC): + """Abstract transport class for ClusterController.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'dataproc.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=client_info, + ), + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=client_info, + ), + self.diagnose_cluster: gapic_v1.method.wrap_method( + self.diagnose_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=300.0, + ), + default_timeout=300.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_cluster(self) -> Callable[ + [clusters.CreateClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def update_cluster(self) -> Callable[ + [clusters.UpdateClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_cluster(self) -> Callable[ + [clusters.DeleteClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_cluster(self) -> Callable[ + [clusters.GetClusterRequest], + Union[ + clusters.Cluster, + Awaitable[clusters.Cluster] + ]]: + raise NotImplementedError() + + @property + def list_clusters(self) -> Callable[ + [clusters.ListClustersRequest], + Union[ + clusters.ListClustersResponse, + Awaitable[clusters.ListClustersResponse] + ]]: + raise NotImplementedError() + + @property + def diagnose_cluster(self) -> Callable[ + [clusters.DiagnoseClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ClusterControllerTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py new file mode 100644 index 00000000..c7429251 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py @@ -0,0 +1,419 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.dataproc_v1beta2.types import clusters +from google.longrunning import operations_pb2 # type: ignore +from .base import ClusterControllerTransport, DEFAULT_CLIENT_INFO + + +class ClusterControllerGrpcTransport(ClusterControllerTransport): + """gRPC backend transport for ClusterController. + + The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_cluster(self) -> Callable[ + [clusters.CreateClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_cluster' not in self._stubs: + self._stubs['create_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster', + request_serializer=clusters.CreateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_cluster'] + + @property + def update_cluster(self) -> Callable[ + [clusters.UpdateClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.UpdateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_cluster' not in self._stubs: + self._stubs['update_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster', + request_serializer=clusters.UpdateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_cluster'] + + @property + def delete_cluster(self) -> Callable[ + [clusters.DeleteClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_cluster' not in self._stubs: + self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster', + request_serializer=clusters.DeleteClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_cluster'] + + @property + def get_cluster(self) -> Callable[ + [clusters.GetClusterRequest], + clusters.Cluster]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the resource representation for a cluster in a + project. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_cluster' not in self._stubs: + self._stubs['get_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.ClusterController/GetCluster', + request_serializer=clusters.GetClusterRequest.serialize, + response_deserializer=clusters.Cluster.deserialize, + ) + return self._stubs['get_cluster'] + + @property + def list_clusters(self) -> Callable[ + [clusters.ListClustersRequest], + clusters.ListClustersResponse]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all regions/{region}/clusters in a project + alphabetically. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_clusters' not in self._stubs: + self._stubs['list_clusters'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.ClusterController/ListClusters', + request_serializer=clusters.ListClustersRequest.serialize, + response_deserializer=clusters.ListClustersResponse.deserialize, + ) + return self._stubs['list_clusters'] + + @property + def diagnose_cluster(self) -> Callable[ + [clusters.DiagnoseClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the diagnose cluster method over gRPC. + + Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.DiagnoseClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'diagnose_cluster' not in self._stubs: + self._stubs['diagnose_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster', + request_serializer=clusters.DiagnoseClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['diagnose_cluster'] + + +__all__ = ( + 'ClusterControllerGrpcTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py new file mode 100644 index 00000000..f4445c89 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py @@ -0,0 +1,423 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1beta2.types import clusters +from google.longrunning import operations_pb2 # type: ignore +from .base import ClusterControllerTransport, DEFAULT_CLIENT_INFO +from .grpc import ClusterControllerGrpcTransport + + +class ClusterControllerGrpcAsyncIOTransport(ClusterControllerTransport): + """gRPC AsyncIO backend transport for ClusterController. + + The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_cluster(self) -> Callable[ + [clusters.CreateClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_cluster' not in self._stubs: + self._stubs['create_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster', + request_serializer=clusters.CreateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_cluster'] + + @property + def update_cluster(self) -> Callable[ + [clusters.UpdateClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.UpdateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_cluster' not in self._stubs: + self._stubs['update_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster', + request_serializer=clusters.UpdateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_cluster'] + + @property + def delete_cluster(self) -> Callable[ + [clusters.DeleteClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_cluster' not in self._stubs: + self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster', + request_serializer=clusters.DeleteClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_cluster'] + + @property + def get_cluster(self) -> Callable[ + [clusters.GetClusterRequest], + Awaitable[clusters.Cluster]]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the resource representation for a cluster in a + project. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_cluster' not in self._stubs: + self._stubs['get_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.ClusterController/GetCluster', + request_serializer=clusters.GetClusterRequest.serialize, + response_deserializer=clusters.Cluster.deserialize, + ) + return self._stubs['get_cluster'] + + @property + def list_clusters(self) -> Callable[ + [clusters.ListClustersRequest], + Awaitable[clusters.ListClustersResponse]]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all regions/{region}/clusters in a project + alphabetically. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_clusters' not in self._stubs: + self._stubs['list_clusters'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.ClusterController/ListClusters', + request_serializer=clusters.ListClustersRequest.serialize, + response_deserializer=clusters.ListClustersResponse.deserialize, + ) + return self._stubs['list_clusters'] + + @property + def diagnose_cluster(self) -> Callable[ + [clusters.DiagnoseClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the diagnose cluster method over gRPC. + + Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.DiagnoseClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'diagnose_cluster' not in self._stubs: + self._stubs['diagnose_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster', + request_serializer=clusters.DiagnoseClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['diagnose_cluster'] + + +__all__ = ( + 'ClusterControllerGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/__init__.py new file mode 100644 index 00000000..19ac5a98 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import JobControllerClient +from .async_client import JobControllerAsyncClient + +__all__ = ( + 'JobControllerClient', + 'JobControllerAsyncClient', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py new file mode 100644 index 00000000..15c53d3e --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py @@ -0,0 +1,796 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1beta2.services.job_controller import pagers +from google.cloud.dataproc_v1beta2.types import jobs +from .transports.base import JobControllerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport +from .client import JobControllerClient + + +class JobControllerAsyncClient: + """The JobController provides methods to manage jobs.""" + + _client: JobControllerClient + + DEFAULT_ENDPOINT = JobControllerClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = JobControllerClient.DEFAULT_MTLS_ENDPOINT + + common_billing_account_path = staticmethod(JobControllerClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(JobControllerClient.parse_common_billing_account_path) + common_folder_path = staticmethod(JobControllerClient.common_folder_path) + parse_common_folder_path = staticmethod(JobControllerClient.parse_common_folder_path) + common_organization_path = staticmethod(JobControllerClient.common_organization_path) + parse_common_organization_path = staticmethod(JobControllerClient.parse_common_organization_path) + common_project_path = staticmethod(JobControllerClient.common_project_path) + parse_common_project_path = staticmethod(JobControllerClient.parse_common_project_path) + common_location_path = staticmethod(JobControllerClient.common_location_path) + parse_common_location_path = staticmethod(JobControllerClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerAsyncClient: The constructed client. + """ + return JobControllerClient.from_service_account_info.__func__(JobControllerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerAsyncClient: The constructed client. + """ + return JobControllerClient.from_service_account_file.__func__(JobControllerAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> JobControllerTransport: + """Returns the transport used by the client instance. + + Returns: + JobControllerTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(JobControllerClient).get_transport_class, type(JobControllerClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, JobControllerTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the job controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.JobControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = JobControllerClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def submit_job(self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Submits a job to a cluster. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.SubmitJobRequest`): + The request object. A request to submit a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (:class:`google.cloud.dataproc_v1beta2.types.Job`): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = jobs.SubmitJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.submit_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def submit_job_as_operation(self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Submits job to a cluster. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.SubmitJobRequest`): + The request object. A request to submit a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (:class:`google.cloud.dataproc_v1beta2.types.Job`): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.dataproc_v1beta2.types.Job` A + Dataproc job resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = jobs.SubmitJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.submit_job_as_operation, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + jobs.Job, + metadata_type=jobs.JobMetadata, + ) + + # Done; return the response. + return response + + async def get_job(self, + request: jobs.GetJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Gets the resource representation for a job in a + project. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.GetJobRequest`): + The request object. A request to get the resource + representation for a job in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = jobs.GetJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_jobs(self, + request: jobs.ListJobsRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListJobsAsyncPager: + r"""Lists regions/{region}/jobs in a project. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.ListJobsRequest`): + The request object. A request to list jobs in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter constraining the jobs to list. + Filters are case-sensitive and have the following + syntax: + + [field = value] AND [field [= value]] ... + + where **field** is ``status.state`` or ``labels.[KEY]``, + and ``[KEY]`` is a label key. **value** can be ``*`` to + match all values. ``status.state`` can be either + ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` + operator is supported; space-separated items are treated + as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND labels.env = staging AND + labels.starred = \* + + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.services.job_controller.pagers.ListJobsAsyncPager: + A list of jobs in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, filter]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = jobs.ListJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_jobs, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_job(self, + request: jobs.UpdateJobRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Updates a job in a project. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.UpdateJobRequest`): + The request object. A request to update a job. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + request = jobs.UpdateJobRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def cancel_job(self, + request: jobs.CancelJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.CancelJobRequest`): + The request object. A request to cancel a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = jobs.CancelJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_job(self, + request: jobs.DeleteJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.DeleteJobRequest`): + The request object. A request to delete a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = jobs.DeleteJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "JobControllerAsyncClient", +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/client.py new file mode 100644 index 00000000..024cf2fd --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/client.py @@ -0,0 +1,927 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1beta2.services.job_controller import pagers +from google.cloud.dataproc_v1beta2.types import jobs +from .transports.base import JobControllerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import JobControllerGrpcTransport +from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport + + +class JobControllerClientMeta(type): + """Metaclass for the JobController client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]] + _transport_registry["grpc"] = JobControllerGrpcTransport + _transport_registry["grpc_asyncio"] = JobControllerGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[JobControllerTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class JobControllerClient(metaclass=JobControllerClientMeta): + """The JobController provides methods to manage jobs.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> JobControllerTransport: + """Returns the transport used by the client instance. + + Returns: + JobControllerTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, JobControllerTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the job controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, JobControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, JobControllerTransport): + # transport is a JobControllerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def submit_job(self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Submits a job to a cluster. + + Args: + request (google.cloud.dataproc_v1beta2.types.SubmitJobRequest): + The request object. A request to submit a job. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (google.cloud.dataproc_v1beta2.types.Job): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.SubmitJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.SubmitJobRequest): + request = jobs.SubmitJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.submit_job] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def submit_job_as_operation(self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Submits job to a cluster. + + Args: + request (google.cloud.dataproc_v1beta2.types.SubmitJobRequest): + The request object. A request to submit a job. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (google.cloud.dataproc_v1beta2.types.Job): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.dataproc_v1beta2.types.Job` A + Dataproc job resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.SubmitJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.SubmitJobRequest): + request = jobs.SubmitJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.submit_job_as_operation] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + jobs.Job, + metadata_type=jobs.JobMetadata, + ) + + # Done; return the response. + return response + + def get_job(self, + request: jobs.GetJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Gets the resource representation for a job in a + project. + + Args: + request (google.cloud.dataproc_v1beta2.types.GetJobRequest): + The request object. A request to get the resource + representation for a job in a project. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (str): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.GetJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.GetJobRequest): + request = jobs.GetJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_job] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_jobs(self, + request: jobs.ListJobsRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListJobsPager: + r"""Lists regions/{region}/jobs in a project. + + Args: + request (google.cloud.dataproc_v1beta2.types.ListJobsRequest): + The request object. A request to list jobs in a project. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (str): + Optional. A filter constraining the jobs to list. + Filters are case-sensitive and have the following + syntax: + + [field = value] AND [field [= value]] ... + + where **field** is ``status.state`` or ``labels.[KEY]``, + and ``[KEY]`` is a label key. **value** can be ``*`` to + match all values. ``status.state`` can be either + ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` + operator is supported; space-separated items are treated + as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND labels.env = staging AND + labels.starred = \* + + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.services.job_controller.pagers.ListJobsPager: + A list of jobs in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, filter]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.ListJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.ListJobsRequest): + request = jobs.ListJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_jobs] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_job(self, + request: jobs.UpdateJobRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Updates a job in a project. + + Args: + request (google.cloud.dataproc_v1beta2.types.UpdateJobRequest): + The request object. A request to update a job. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a jobs.UpdateJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.UpdateJobRequest): + request = jobs.UpdateJobRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_job] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def cancel_job(self, + request: jobs.CancelJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Args: + request (google.cloud.dataproc_v1beta2.types.CancelJobRequest): + The request object. A request to cancel a job. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (str): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.CancelJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.CancelJobRequest): + request = jobs.CancelJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_job] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_job(self, + request: jobs.DeleteJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Args: + request (google.cloud.dataproc_v1beta2.types.DeleteJobRequest): + The request object. A request to delete a job. + project_id (str): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Required. The Dataproc region in + which to handle the request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (str): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.DeleteJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.DeleteJobRequest): + request = jobs.DeleteJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_job] + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "JobControllerClient", +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py new file mode 100644 index 00000000..9f8b6150 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.dataproc_v1beta2.types import jobs + + +class ListJobsPager: + """A pager for iterating through ``list_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1beta2.types.ListJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListJobs`` requests and continue to iterate + through the ``jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., jobs.ListJobsResponse], + request: jobs.ListJobsRequest, + response: jobs.ListJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1beta2.types.ListJobsRequest): + The initial request object. + response (google.cloud.dataproc_v1beta2.types.ListJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = jobs.ListJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[jobs.ListJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[jobs.Job]: + for page in self.pages: + yield from page.jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListJobsAsyncPager: + """A pager for iterating through ``list_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1beta2.types.ListJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListJobs`` requests and continue to iterate + through the ``jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[jobs.ListJobsResponse]], + request: jobs.ListJobsRequest, + response: jobs.ListJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1beta2.types.ListJobsRequest): + The initial request object. + response (google.cloud.dataproc_v1beta2.types.ListJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = jobs.ListJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[jobs.ListJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[jobs.Job]: + async def async_generator(): + async for page in self.pages: + for response in page.jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py new file mode 100644 index 00000000..b35119f2 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import JobControllerTransport +from .grpc import JobControllerGrpcTransport +from .grpc_asyncio import JobControllerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]] +_transport_registry['grpc'] = JobControllerGrpcTransport +_transport_registry['grpc_asyncio'] = JobControllerGrpcAsyncIOTransport + +__all__ = ( + 'JobControllerTransport', + 'JobControllerGrpcTransport', + 'JobControllerGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py new file mode 100644 index 00000000..f6b13988 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py @@ -0,0 +1,308 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1beta2.types import jobs +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-dataproc', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class JobControllerTransport(abc.ABC): + """Abstract transport class for JobController.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'dataproc.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.submit_job: gapic_v1.method.wrap_method( + self.submit_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + self.submit_job_as_operation: gapic_v1.method.wrap_method( + self.submit_job_as_operation, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + self.get_job: gapic_v1.method.wrap_method( + self.get_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + self.list_jobs: gapic_v1.method.wrap_method( + self.list_jobs, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + self.update_job: gapic_v1.method.wrap_method( + self.update_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + self.cancel_job: gapic_v1.method.wrap_method( + self.cancel_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + self.delete_job: gapic_v1.method.wrap_method( + self.delete_job, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=900.0, + ), + default_timeout=900.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def submit_job(self) -> Callable[ + [jobs.SubmitJobRequest], + Union[ + jobs.Job, + Awaitable[jobs.Job] + ]]: + raise NotImplementedError() + + @property + def submit_job_as_operation(self) -> Callable[ + [jobs.SubmitJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_job(self) -> Callable[ + [jobs.GetJobRequest], + Union[ + jobs.Job, + Awaitable[jobs.Job] + ]]: + raise NotImplementedError() + + @property + def list_jobs(self) -> Callable[ + [jobs.ListJobsRequest], + Union[ + jobs.ListJobsResponse, + Awaitable[jobs.ListJobsResponse] + ]]: + raise NotImplementedError() + + @property + def update_job(self) -> Callable[ + [jobs.UpdateJobRequest], + Union[ + jobs.Job, + Awaitable[jobs.Job] + ]]: + raise NotImplementedError() + + @property + def cancel_job(self) -> Callable[ + [jobs.CancelJobRequest], + Union[ + jobs.Job, + Awaitable[jobs.Job] + ]]: + raise NotImplementedError() + + @property + def delete_job(self) -> Callable[ + [jobs.DeleteJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'JobControllerTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py new file mode 100644 index 00000000..a4a34ea4 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py @@ -0,0 +1,434 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.dataproc_v1beta2.types import jobs +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import JobControllerTransport, DEFAULT_CLIENT_INFO + + +class JobControllerGrpcTransport(JobControllerTransport): + """gRPC backend transport for JobController. + + The JobController provides methods to manage jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def submit_job(self) -> Callable[ + [jobs.SubmitJobRequest], + jobs.Job]: + r"""Return a callable for the submit job method over gRPC. + + Submits a job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'submit_job' not in self._stubs: + self._stubs['submit_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/SubmitJob', + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['submit_job'] + + @property + def submit_job_as_operation(self) -> Callable[ + [jobs.SubmitJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the submit job as operation method over gRPC. + + Submits job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'submit_job_as_operation' not in self._stubs: + self._stubs['submit_job_as_operation'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/SubmitJobAsOperation', + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['submit_job_as_operation'] + + @property + def get_job(self) -> Callable[ + [jobs.GetJobRequest], + jobs.Job]: + r"""Return a callable for the get job method over gRPC. + + Gets the resource representation for a job in a + project. + + Returns: + Callable[[~.GetJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_job' not in self._stubs: + self._stubs['get_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/GetJob', + request_serializer=jobs.GetJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['get_job'] + + @property + def list_jobs(self) -> Callable[ + [jobs.ListJobsRequest], + jobs.ListJobsResponse]: + r"""Return a callable for the list jobs method over gRPC. + + Lists regions/{region}/jobs in a project. + + Returns: + Callable[[~.ListJobsRequest], + ~.ListJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_jobs' not in self._stubs: + self._stubs['list_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/ListJobs', + request_serializer=jobs.ListJobsRequest.serialize, + response_deserializer=jobs.ListJobsResponse.deserialize, + ) + return self._stubs['list_jobs'] + + @property + def update_job(self) -> Callable[ + [jobs.UpdateJobRequest], + jobs.Job]: + r"""Return a callable for the update job method over gRPC. + + Updates a job in a project. + + Returns: + Callable[[~.UpdateJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_job' not in self._stubs: + self._stubs['update_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/UpdateJob', + request_serializer=jobs.UpdateJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['update_job'] + + @property + def cancel_job(self) -> Callable[ + [jobs.CancelJobRequest], + jobs.Job]: + r"""Return a callable for the cancel job method over gRPC. + + Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Returns: + Callable[[~.CancelJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_job' not in self._stubs: + self._stubs['cancel_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/CancelJob', + request_serializer=jobs.CancelJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['cancel_job'] + + @property + def delete_job(self) -> Callable[ + [jobs.DeleteJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete job method over gRPC. + + Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Returns: + Callable[[~.DeleteJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_job' not in self._stubs: + self._stubs['delete_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/DeleteJob', + request_serializer=jobs.DeleteJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_job'] + + +__all__ = ( + 'JobControllerGrpcTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py new file mode 100644 index 00000000..b846035d --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py @@ -0,0 +1,438 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1beta2.types import jobs +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import JobControllerTransport, DEFAULT_CLIENT_INFO +from .grpc import JobControllerGrpcTransport + + +class JobControllerGrpcAsyncIOTransport(JobControllerTransport): + """gRPC AsyncIO backend transport for JobController. + + The JobController provides methods to manage jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def submit_job(self) -> Callable[ + [jobs.SubmitJobRequest], + Awaitable[jobs.Job]]: + r"""Return a callable for the submit job method over gRPC. + + Submits a job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'submit_job' not in self._stubs: + self._stubs['submit_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/SubmitJob', + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['submit_job'] + + @property + def submit_job_as_operation(self) -> Callable[ + [jobs.SubmitJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the submit job as operation method over gRPC. + + Submits job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'submit_job_as_operation' not in self._stubs: + self._stubs['submit_job_as_operation'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/SubmitJobAsOperation', + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['submit_job_as_operation'] + + @property + def get_job(self) -> Callable[ + [jobs.GetJobRequest], + Awaitable[jobs.Job]]: + r"""Return a callable for the get job method over gRPC. + + Gets the resource representation for a job in a + project. + + Returns: + Callable[[~.GetJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_job' not in self._stubs: + self._stubs['get_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/GetJob', + request_serializer=jobs.GetJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['get_job'] + + @property + def list_jobs(self) -> Callable[ + [jobs.ListJobsRequest], + Awaitable[jobs.ListJobsResponse]]: + r"""Return a callable for the list jobs method over gRPC. + + Lists regions/{region}/jobs in a project. + + Returns: + Callable[[~.ListJobsRequest], + Awaitable[~.ListJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_jobs' not in self._stubs: + self._stubs['list_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/ListJobs', + request_serializer=jobs.ListJobsRequest.serialize, + response_deserializer=jobs.ListJobsResponse.deserialize, + ) + return self._stubs['list_jobs'] + + @property + def update_job(self) -> Callable[ + [jobs.UpdateJobRequest], + Awaitable[jobs.Job]]: + r"""Return a callable for the update job method over gRPC. + + Updates a job in a project. + + Returns: + Callable[[~.UpdateJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_job' not in self._stubs: + self._stubs['update_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/UpdateJob', + request_serializer=jobs.UpdateJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['update_job'] + + @property + def cancel_job(self) -> Callable[ + [jobs.CancelJobRequest], + Awaitable[jobs.Job]]: + r"""Return a callable for the cancel job method over gRPC. + + Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Returns: + Callable[[~.CancelJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_job' not in self._stubs: + self._stubs['cancel_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/CancelJob', + request_serializer=jobs.CancelJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs['cancel_job'] + + @property + def delete_job(self) -> Callable[ + [jobs.DeleteJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete job method over gRPC. + + Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Returns: + Callable[[~.DeleteJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_job' not in self._stubs: + self._stubs['delete_job'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.JobController/DeleteJob', + request_serializer=jobs.DeleteJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_job'] + + +__all__ = ( + 'JobControllerGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py new file mode 100644 index 00000000..1dd621e9 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import WorkflowTemplateServiceClient +from .async_client import WorkflowTemplateServiceAsyncClient + +__all__ = ( + 'WorkflowTemplateServiceClient', + 'WorkflowTemplateServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py new file mode 100644 index 00000000..066c7077 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py @@ -0,0 +1,943 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1beta2.services.workflow_template_service import pagers +from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport +from .client import WorkflowTemplateServiceClient + + +class WorkflowTemplateServiceAsyncClient: + """The API interface for managing Workflow Templates in the + Dataproc API. + """ + + _client: WorkflowTemplateServiceClient + + DEFAULT_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_MTLS_ENDPOINT + + cluster_path = staticmethod(WorkflowTemplateServiceClient.cluster_path) + parse_cluster_path = staticmethod(WorkflowTemplateServiceClient.parse_cluster_path) + workflow_template_path = staticmethod(WorkflowTemplateServiceClient.workflow_template_path) + parse_workflow_template_path = staticmethod(WorkflowTemplateServiceClient.parse_workflow_template_path) + common_billing_account_path = staticmethod(WorkflowTemplateServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(WorkflowTemplateServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(WorkflowTemplateServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(WorkflowTemplateServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(WorkflowTemplateServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(WorkflowTemplateServiceClient.parse_common_organization_path) + common_project_path = staticmethod(WorkflowTemplateServiceClient.common_project_path) + parse_common_project_path = staticmethod(WorkflowTemplateServiceClient.parse_common_project_path) + common_location_path = staticmethod(WorkflowTemplateServiceClient.common_location_path) + parse_common_location_path = staticmethod(WorkflowTemplateServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceAsyncClient: The constructed client. + """ + return WorkflowTemplateServiceClient.from_service_account_info.__func__(WorkflowTemplateServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceAsyncClient: The constructed client. + """ + return WorkflowTemplateServiceClient.from_service_account_file.__func__(WorkflowTemplateServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> WorkflowTemplateServiceTransport: + """Returns the transport used by the client instance. + + Returns: + WorkflowTemplateServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(WorkflowTemplateServiceClient).get_transport_class, type(WorkflowTemplateServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, WorkflowTemplateServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the workflow template service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.WorkflowTemplateServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = WorkflowTemplateServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_workflow_template(self, + request: workflow_templates.CreateWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Creates new workflow template. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.CreateWorkflowTemplateRequest`): + The request object. A request to create a workflow + template. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (:class:`google.cloud.dataproc_v1beta2.types.WorkflowTemplate`): + Required. The Dataproc workflow + template to create. + + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, template]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.CreateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_workflow_template(self, + request: workflow_templates.GetWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.GetWorkflowTemplateRequest`): + The request object. A request to fetch a workflow + template. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.GetWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def instantiate_workflow_template(self, + request: workflow_templates.InstantiateWorkflowTemplateRequest = None, + *, + name: str = None, + parameters: Sequence[workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest`): + The request object. A request to instantiate a workflow + template. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`Sequence[google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest.ParametersEntry]`): + Optional. Map from parameter names to + values that should be used for those + parameters. Values may not exceed 100 + characters. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, parameters]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.InstantiateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + if parameters: + request.parameters.update(parameters) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.instantiate_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + async def instantiate_inline_workflow_template(self, + request: workflow_templates.InstantiateInlineWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.InstantiateInlineWorkflowTemplateRequest`): + The request object. A request to instantiate an inline + workflow template. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates,instantiateinline``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.workflowTemplates.instantiateinline``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (:class:`google.cloud.dataproc_v1beta2.types.WorkflowTemplate`): + Required. The workflow template to + instantiate. + + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, template]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.instantiate_inline_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + async def update_workflow_template(self, + request: workflow_templates.UpdateWorkflowTemplateRequest = None, + *, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.UpdateWorkflowTemplateRequest`): + The request object. A request to update a workflow + template. + template (:class:`google.cloud.dataproc_v1beta2.types.WorkflowTemplate`): + Required. The updated workflow template. + + The ``template.version`` field must match the current + version. + + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([template]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.UpdateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("template.name", request.template.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_workflow_templates(self, + request: workflow_templates.ListWorkflowTemplatesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListWorkflowTemplatesAsyncPager: + r"""Lists workflows that match the specified filter in + the request. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesRequest`): + The request object. A request to list workflow templates + in a project. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.services.workflow_template_service.pagers.ListWorkflowTemplatesAsyncPager: + A response to a request to list + workflow templates in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.ListWorkflowTemplatesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_workflow_templates, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListWorkflowTemplatesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_workflow_template(self, + request: workflow_templates.DeleteWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a workflow template. It does not cancel in- + rogress workflows. + + Args: + request (:class:`google.cloud.dataproc_v1beta2.types.DeleteWorkflowTemplateRequest`): + The request object. A request to delete a workflow + template. + Currently started workflows will remain running. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.delete``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = workflow_templates.DeleteWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "WorkflowTemplateServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py new file mode 100644 index 00000000..fd524a3d --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py @@ -0,0 +1,1092 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1beta2.services.workflow_template_service import pagers +from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import WorkflowTemplateServiceGrpcTransport +from .transports.grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport + + +class WorkflowTemplateServiceClientMeta(type): + """Metaclass for the WorkflowTemplateService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[WorkflowTemplateServiceTransport]] + _transport_registry["grpc"] = WorkflowTemplateServiceGrpcTransport + _transport_registry["grpc_asyncio"] = WorkflowTemplateServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[WorkflowTemplateServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class WorkflowTemplateServiceClient(metaclass=WorkflowTemplateServiceClientMeta): + """The API interface for managing Workflow Templates in the + Dataproc API. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> WorkflowTemplateServiceTransport: + """Returns the transport used by the client instance. + + Returns: + WorkflowTemplateServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def cluster_path(project: str,location: str,cluster: str,) -> str: + """Returns a fully-qualified cluster string.""" + return "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str,str]: + """Parses a cluster path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def workflow_template_path(project: str,region: str,workflow_template: str,) -> str: + """Returns a fully-qualified workflow_template string.""" + return "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format(project=project, region=region, workflow_template=workflow_template, ) + + @staticmethod + def parse_workflow_template_path(path: str) -> Dict[str,str]: + """Parses a workflow_template path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/regions/(?P.+?)/workflowTemplates/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, WorkflowTemplateServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the workflow template service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, WorkflowTemplateServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, WorkflowTemplateServiceTransport): + # transport is a WorkflowTemplateServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_workflow_template(self, + request: workflow_templates.CreateWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Creates new workflow template. + + Args: + request (google.cloud.dataproc_v1beta2.types.CreateWorkflowTemplateRequest): + The request object. A request to create a workflow + template. + parent (str): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): + Required. The Dataproc workflow + template to create. + + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, template]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.CreateWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.CreateWorkflowTemplateRequest): + request = workflow_templates.CreateWorkflowTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_workflow_template(self, + request: workflow_templates.GetWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Args: + request (google.cloud.dataproc_v1beta2.types.GetWorkflowTemplateRequest): + The request object. A request to fetch a workflow + template. + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.GetWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.GetWorkflowTemplateRequest): + request = workflow_templates.GetWorkflowTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def instantiate_workflow_template(self, + request: workflow_templates.InstantiateWorkflowTemplateRequest = None, + *, + name: str = None, + parameters: Sequence[workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest): + The request object. A request to instantiate a workflow + template. + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (Sequence[google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest.ParametersEntry]): + Optional. Map from parameter names to + values that should be used for those + parameters. Values may not exceed 100 + characters. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, parameters]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.InstantiateWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.InstantiateWorkflowTemplateRequest): + request = workflow_templates.InstantiateWorkflowTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if parameters is not None: + request.parameters = parameters + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.instantiate_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + def instantiate_inline_workflow_template(self, + request: workflow_templates.InstantiateInlineWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (google.cloud.dataproc_v1beta2.types.InstantiateInlineWorkflowTemplateRequest): + The request object. A request to instantiate an inline + workflow template. + parent (str): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates,instantiateinline``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.workflowTemplates.instantiateinline``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): + Required. The workflow template to + instantiate. + + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, template]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.InstantiateInlineWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.InstantiateInlineWorkflowTemplateRequest): + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.instantiate_inline_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + def update_workflow_template(self, + request: workflow_templates.UpdateWorkflowTemplateRequest = None, + *, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Args: + request (google.cloud.dataproc_v1beta2.types.UpdateWorkflowTemplateRequest): + The request object. A request to update a workflow + template. + template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): + Required. The updated workflow template. + + The ``template.version`` field must match the current + version. + + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.types.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([template]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.UpdateWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.UpdateWorkflowTemplateRequest): + request = workflow_templates.UpdateWorkflowTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("template.name", request.template.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_workflow_templates(self, + request: workflow_templates.ListWorkflowTemplatesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListWorkflowTemplatesPager: + r"""Lists workflows that match the specified filter in + the request. + + Args: + request (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesRequest): + The request object. A request to list workflow templates + in a project. + parent (str): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1beta2.services.workflow_template_service.pagers.ListWorkflowTemplatesPager: + A response to a request to list + workflow templates in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.ListWorkflowTemplatesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.ListWorkflowTemplatesRequest): + request = workflow_templates.ListWorkflowTemplatesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_workflow_templates] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListWorkflowTemplatesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_workflow_template(self, + request: workflow_templates.DeleteWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a workflow template. It does not cancel in- + rogress workflows. + + Args: + request (google.cloud.dataproc_v1beta2.types.DeleteWorkflowTemplateRequest): + The request object. A request to delete a workflow + template. + Currently started workflows will remain running. + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.delete``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.DeleteWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.DeleteWorkflowTemplateRequest): + request = workflow_templates.DeleteWorkflowTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-dataproc", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "WorkflowTemplateServiceClient", +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py new file mode 100644 index 00000000..8922f151 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.dataproc_v1beta2.types import workflow_templates + + +class ListWorkflowTemplatesPager: + """A pager for iterating through ``list_workflow_templates`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``templates`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListWorkflowTemplates`` requests and continue to iterate + through the ``templates`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., workflow_templates.ListWorkflowTemplatesResponse], + request: workflow_templates.ListWorkflowTemplatesRequest, + response: workflow_templates.ListWorkflowTemplatesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesRequest): + The initial request object. + response (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = workflow_templates.ListWorkflowTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[workflow_templates.ListWorkflowTemplatesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[workflow_templates.WorkflowTemplate]: + for page in self.pages: + yield from page.templates + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListWorkflowTemplatesAsyncPager: + """A pager for iterating through ``list_workflow_templates`` requests. + + This class thinly wraps an initial + :class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``templates`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListWorkflowTemplates`` requests and continue to iterate + through the ``templates`` field on the + corresponding responses. + + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[workflow_templates.ListWorkflowTemplatesResponse]], + request: workflow_templates.ListWorkflowTemplatesRequest, + response: workflow_templates.ListWorkflowTemplatesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesRequest): + The initial request object. + response (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = workflow_templates.ListWorkflowTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[workflow_templates.ListWorkflowTemplatesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[workflow_templates.WorkflowTemplate]: + async def async_generator(): + async for page in self.pages: + for response in page.templates: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py new file mode 100644 index 00000000..96efd4cb --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import WorkflowTemplateServiceTransport +from .grpc import WorkflowTemplateServiceGrpcTransport +from .grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[WorkflowTemplateServiceTransport]] +_transport_registry['grpc'] = WorkflowTemplateServiceGrpcTransport +_transport_registry['grpc_asyncio'] = WorkflowTemplateServiceGrpcAsyncIOTransport + +__all__ = ( + 'WorkflowTemplateServiceTransport', + 'WorkflowTemplateServiceGrpcTransport', + 'WorkflowTemplateServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py new file mode 100644 index 00000000..b8a83efd --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py @@ -0,0 +1,306 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-dataproc', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class WorkflowTemplateServiceTransport(abc.ABC): + """Abstract transport class for WorkflowTemplateService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'dataproc.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_workflow_template: gapic_v1.method.wrap_method( + self.create_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.get_workflow_template: gapic_v1.method.wrap_method( + self.get_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.instantiate_workflow_template: gapic_v1.method.wrap_method( + self.instantiate_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.instantiate_inline_workflow_template: gapic_v1.method.wrap_method( + self.instantiate_inline_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.update_workflow_template: gapic_v1.method.wrap_method( + self.update_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.list_workflow_templates: gapic_v1.method.wrap_method( + self.list_workflow_templates, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.InternalServerError, + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + self.delete_workflow_template: gapic_v1.method.wrap_method( + self.delete_workflow_template, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=600.0, + ), + default_timeout=600.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_workflow_template(self) -> Callable[ + [workflow_templates.CreateWorkflowTemplateRequest], + Union[ + workflow_templates.WorkflowTemplate, + Awaitable[workflow_templates.WorkflowTemplate] + ]]: + raise NotImplementedError() + + @property + def get_workflow_template(self) -> Callable[ + [workflow_templates.GetWorkflowTemplateRequest], + Union[ + workflow_templates.WorkflowTemplate, + Awaitable[workflow_templates.WorkflowTemplate] + ]]: + raise NotImplementedError() + + @property + def instantiate_workflow_template(self) -> Callable[ + [workflow_templates.InstantiateWorkflowTemplateRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def instantiate_inline_workflow_template(self) -> Callable[ + [workflow_templates.InstantiateInlineWorkflowTemplateRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def update_workflow_template(self) -> Callable[ + [workflow_templates.UpdateWorkflowTemplateRequest], + Union[ + workflow_templates.WorkflowTemplate, + Awaitable[workflow_templates.WorkflowTemplate] + ]]: + raise NotImplementedError() + + @property + def list_workflow_templates(self) -> Callable[ + [workflow_templates.ListWorkflowTemplatesRequest], + Union[ + workflow_templates.ListWorkflowTemplatesResponse, + Awaitable[workflow_templates.ListWorkflowTemplatesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_workflow_template(self) -> Callable[ + [workflow_templates.DeleteWorkflowTemplateRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'WorkflowTemplateServiceTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py new file mode 100644 index 00000000..9dc86cd7 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py @@ -0,0 +1,481 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO + + +class WorkflowTemplateServiceGrpcTransport(WorkflowTemplateServiceTransport): + """gRPC backend transport for WorkflowTemplateService. + + The API interface for managing Workflow Templates in the + Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_workflow_template(self) -> Callable[ + [workflow_templates.CreateWorkflowTemplateRequest], + workflow_templates.WorkflowTemplate]: + r"""Return a callable for the create workflow template method over gRPC. + + Creates new workflow template. + + Returns: + Callable[[~.CreateWorkflowTemplateRequest], + ~.WorkflowTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_workflow_template' not in self._stubs: + self._stubs['create_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate', + request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs['create_workflow_template'] + + @property + def get_workflow_template(self) -> Callable[ + [workflow_templates.GetWorkflowTemplateRequest], + workflow_templates.WorkflowTemplate]: + r"""Return a callable for the get workflow template method over gRPC. + + Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Returns: + Callable[[~.GetWorkflowTemplateRequest], + ~.WorkflowTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_workflow_template' not in self._stubs: + self._stubs['get_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate', + request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs['get_workflow_template'] + + @property + def instantiate_workflow_template(self) -> Callable[ + [workflow_templates.InstantiateWorkflowTemplateRequest], + operations_pb2.Operation]: + r"""Return a callable for the instantiate workflow template method over gRPC. + + Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateWorkflowTemplateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'instantiate_workflow_template' not in self._stubs: + self._stubs['instantiate_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate', + request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['instantiate_workflow_template'] + + @property + def instantiate_inline_workflow_template(self) -> Callable[ + [workflow_templates.InstantiateInlineWorkflowTemplateRequest], + operations_pb2.Operation]: + r"""Return a callable for the instantiate inline workflow + template method over gRPC. + + Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateInlineWorkflowTemplateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'instantiate_inline_workflow_template' not in self._stubs: + self._stubs['instantiate_inline_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateInlineWorkflowTemplate', + request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['instantiate_inline_workflow_template'] + + @property + def update_workflow_template(self) -> Callable[ + [workflow_templates.UpdateWorkflowTemplateRequest], + workflow_templates.WorkflowTemplate]: + r"""Return a callable for the update workflow template method over gRPC. + + Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Returns: + Callable[[~.UpdateWorkflowTemplateRequest], + ~.WorkflowTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_workflow_template' not in self._stubs: + self._stubs['update_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate', + request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs['update_workflow_template'] + + @property + def list_workflow_templates(self) -> Callable[ + [workflow_templates.ListWorkflowTemplatesRequest], + workflow_templates.ListWorkflowTemplatesResponse]: + r"""Return a callable for the list workflow templates method over gRPC. + + Lists workflows that match the specified filter in + the request. + + Returns: + Callable[[~.ListWorkflowTemplatesRequest], + ~.ListWorkflowTemplatesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_workflow_templates' not in self._stubs: + self._stubs['list_workflow_templates'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates', + request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize, + response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize, + ) + return self._stubs['list_workflow_templates'] + + @property + def delete_workflow_template(self) -> Callable[ + [workflow_templates.DeleteWorkflowTemplateRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete workflow template method over gRPC. + + Deletes a workflow template. It does not cancel in- + rogress workflows. + + Returns: + Callable[[~.DeleteWorkflowTemplateRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_workflow_template' not in self._stubs: + self._stubs['delete_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate', + request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_workflow_template'] + + +__all__ = ( + 'WorkflowTemplateServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..3894c9b6 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py @@ -0,0 +1,485 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import WorkflowTemplateServiceGrpcTransport + + +class WorkflowTemplateServiceGrpcAsyncIOTransport(WorkflowTemplateServiceTransport): + """gRPC AsyncIO backend transport for WorkflowTemplateService. + + The API interface for managing Workflow Templates in the + Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'dataproc.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_workflow_template(self) -> Callable[ + [workflow_templates.CreateWorkflowTemplateRequest], + Awaitable[workflow_templates.WorkflowTemplate]]: + r"""Return a callable for the create workflow template method over gRPC. + + Creates new workflow template. + + Returns: + Callable[[~.CreateWorkflowTemplateRequest], + Awaitable[~.WorkflowTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_workflow_template' not in self._stubs: + self._stubs['create_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate', + request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs['create_workflow_template'] + + @property + def get_workflow_template(self) -> Callable[ + [workflow_templates.GetWorkflowTemplateRequest], + Awaitable[workflow_templates.WorkflowTemplate]]: + r"""Return a callable for the get workflow template method over gRPC. + + Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Returns: + Callable[[~.GetWorkflowTemplateRequest], + Awaitable[~.WorkflowTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_workflow_template' not in self._stubs: + self._stubs['get_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate', + request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs['get_workflow_template'] + + @property + def instantiate_workflow_template(self) -> Callable[ + [workflow_templates.InstantiateWorkflowTemplateRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the instantiate workflow template method over gRPC. + + Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateWorkflowTemplateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'instantiate_workflow_template' not in self._stubs: + self._stubs['instantiate_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate', + request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['instantiate_workflow_template'] + + @property + def instantiate_inline_workflow_template(self) -> Callable[ + [workflow_templates.InstantiateInlineWorkflowTemplateRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the instantiate inline workflow + template method over gRPC. + + Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateInlineWorkflowTemplateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'instantiate_inline_workflow_template' not in self._stubs: + self._stubs['instantiate_inline_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateInlineWorkflowTemplate', + request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['instantiate_inline_workflow_template'] + + @property + def update_workflow_template(self) -> Callable[ + [workflow_templates.UpdateWorkflowTemplateRequest], + Awaitable[workflow_templates.WorkflowTemplate]]: + r"""Return a callable for the update workflow template method over gRPC. + + Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Returns: + Callable[[~.UpdateWorkflowTemplateRequest], + Awaitable[~.WorkflowTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_workflow_template' not in self._stubs: + self._stubs['update_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate', + request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs['update_workflow_template'] + + @property + def list_workflow_templates(self) -> Callable[ + [workflow_templates.ListWorkflowTemplatesRequest], + Awaitable[workflow_templates.ListWorkflowTemplatesResponse]]: + r"""Return a callable for the list workflow templates method over gRPC. + + Lists workflows that match the specified filter in + the request. + + Returns: + Callable[[~.ListWorkflowTemplatesRequest], + Awaitable[~.ListWorkflowTemplatesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_workflow_templates' not in self._stubs: + self._stubs['list_workflow_templates'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates', + request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize, + response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize, + ) + return self._stubs['list_workflow_templates'] + + @property + def delete_workflow_template(self) -> Callable[ + [workflow_templates.DeleteWorkflowTemplateRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete workflow template method over gRPC. + + Deletes a workflow template. It does not cancel in- + rogress workflows. + + Returns: + Callable[[~.DeleteWorkflowTemplateRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_workflow_template' not in self._stubs: + self._stubs['delete_workflow_template'] = self.grpc_channel.unary_unary( + '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate', + request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_workflow_template'] + + +__all__ = ( + 'WorkflowTemplateServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/__init__.py new file mode 100644 index 00000000..de792217 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/__init__.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .autoscaling_policies import ( + AutoscalingPolicy, + BasicAutoscalingAlgorithm, + BasicYarnAutoscalingConfig, + CreateAutoscalingPolicyRequest, + DeleteAutoscalingPolicyRequest, + GetAutoscalingPolicyRequest, + InstanceGroupAutoscalingPolicyConfig, + ListAutoscalingPoliciesRequest, + ListAutoscalingPoliciesResponse, + UpdateAutoscalingPolicyRequest, +) +from .clusters import ( + AcceleratorConfig, + AutoscalingConfig, + Cluster, + ClusterConfig, + ClusterMetrics, + ClusterStatus, + CreateClusterRequest, + DeleteClusterRequest, + DiagnoseClusterRequest, + DiagnoseClusterResults, + DiskConfig, + EncryptionConfig, + EndpointConfig, + GceClusterConfig, + GetClusterRequest, + GkeClusterConfig, + InstanceGroupConfig, + KerberosConfig, + LifecycleConfig, + ListClustersRequest, + ListClustersResponse, + ManagedGroupConfig, + NodeInitializationAction, + ReservationAffinity, + SecurityConfig, + SoftwareConfig, + UpdateClusterRequest, +) +from .jobs import ( + CancelJobRequest, + DeleteJobRequest, + GetJobRequest, + HadoopJob, + HiveJob, + Job, + JobMetadata, + JobPlacement, + JobReference, + JobScheduling, + JobStatus, + ListJobsRequest, + ListJobsResponse, + LoggingConfig, + PigJob, + PrestoJob, + PySparkJob, + QueryList, + SparkJob, + SparkRJob, + SparkSqlJob, + SubmitJobRequest, + UpdateJobRequest, + YarnApplication, +) +from .operations import ( + ClusterOperationMetadata, + ClusterOperationStatus, +) +from .workflow_templates import ( + ClusterOperation, + ClusterSelector, + CreateWorkflowTemplateRequest, + DeleteWorkflowTemplateRequest, + GetWorkflowTemplateRequest, + InstantiateInlineWorkflowTemplateRequest, + InstantiateWorkflowTemplateRequest, + ListWorkflowTemplatesRequest, + ListWorkflowTemplatesResponse, + ManagedCluster, + OrderedJob, + ParameterValidation, + RegexValidation, + TemplateParameter, + UpdateWorkflowTemplateRequest, + ValueValidation, + WorkflowGraph, + WorkflowMetadata, + WorkflowNode, + WorkflowTemplate, + WorkflowTemplatePlacement, +) + +__all__ = ( + 'AutoscalingPolicy', + 'BasicAutoscalingAlgorithm', + 'BasicYarnAutoscalingConfig', + 'CreateAutoscalingPolicyRequest', + 'DeleteAutoscalingPolicyRequest', + 'GetAutoscalingPolicyRequest', + 'InstanceGroupAutoscalingPolicyConfig', + 'ListAutoscalingPoliciesRequest', + 'ListAutoscalingPoliciesResponse', + 'UpdateAutoscalingPolicyRequest', + 'AcceleratorConfig', + 'AutoscalingConfig', + 'Cluster', + 'ClusterConfig', + 'ClusterMetrics', + 'ClusterStatus', + 'CreateClusterRequest', + 'DeleteClusterRequest', + 'DiagnoseClusterRequest', + 'DiagnoseClusterResults', + 'DiskConfig', + 'EncryptionConfig', + 'EndpointConfig', + 'GceClusterConfig', + 'GetClusterRequest', + 'GkeClusterConfig', + 'InstanceGroupConfig', + 'KerberosConfig', + 'LifecycleConfig', + 'ListClustersRequest', + 'ListClustersResponse', + 'ManagedGroupConfig', + 'NodeInitializationAction', + 'ReservationAffinity', + 'SecurityConfig', + 'SoftwareConfig', + 'UpdateClusterRequest', + 'CancelJobRequest', + 'DeleteJobRequest', + 'GetJobRequest', + 'HadoopJob', + 'HiveJob', + 'Job', + 'JobMetadata', + 'JobPlacement', + 'JobReference', + 'JobScheduling', + 'JobStatus', + 'ListJobsRequest', + 'ListJobsResponse', + 'LoggingConfig', + 'PigJob', + 'PrestoJob', + 'PySparkJob', + 'QueryList', + 'SparkJob', + 'SparkRJob', + 'SparkSqlJob', + 'SubmitJobRequest', + 'UpdateJobRequest', + 'YarnApplication', + 'ClusterOperationMetadata', + 'ClusterOperationStatus', + 'Component', + 'ClusterOperation', + 'ClusterSelector', + 'CreateWorkflowTemplateRequest', + 'DeleteWorkflowTemplateRequest', + 'GetWorkflowTemplateRequest', + 'InstantiateInlineWorkflowTemplateRequest', + 'InstantiateWorkflowTemplateRequest', + 'ListWorkflowTemplatesRequest', + 'ListWorkflowTemplatesResponse', + 'ManagedCluster', + 'OrderedJob', + 'ParameterValidation', + 'RegexValidation', + 'TemplateParameter', + 'UpdateWorkflowTemplateRequest', + 'ValueValidation', + 'WorkflowGraph', + 'WorkflowMetadata', + 'WorkflowNode', + 'WorkflowTemplate', + 'WorkflowTemplatePlacement', +) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py new file mode 100644 index 00000000..7e7fd933 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py @@ -0,0 +1,416 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.dataproc.v1beta2', + manifest={ + 'AutoscalingPolicy', + 'BasicAutoscalingAlgorithm', + 'BasicYarnAutoscalingConfig', + 'InstanceGroupAutoscalingPolicyConfig', + 'CreateAutoscalingPolicyRequest', + 'GetAutoscalingPolicyRequest', + 'UpdateAutoscalingPolicyRequest', + 'DeleteAutoscalingPolicyRequest', + 'ListAutoscalingPoliciesRequest', + 'ListAutoscalingPoliciesResponse', + }, +) + + +class AutoscalingPolicy(proto.Message): + r"""Describes an autoscaling policy for Dataproc cluster + autoscaler. + + Attributes: + id (str): + Required. The policy id. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). Cannot begin or end with + underscore or hyphen. Must consist of between 3 and 50 + characters. + name (str): + Output only. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies``, the + resource name of the policy has the following format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + basic_algorithm (google.cloud.dataproc_v1beta2.types.BasicAutoscalingAlgorithm): + + worker_config (google.cloud.dataproc_v1beta2.types.InstanceGroupAutoscalingPolicyConfig): + Required. Describes how the autoscaler will + operate for primary workers. + secondary_worker_config (google.cloud.dataproc_v1beta2.types.InstanceGroupAutoscalingPolicyConfig): + Optional. Describes how the autoscaler will + operate for secondary workers. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + name = proto.Field( + proto.STRING, + number=2, + ) + basic_algorithm = proto.Field( + proto.MESSAGE, + number=3, + oneof='algorithm', + message='BasicAutoscalingAlgorithm', + ) + worker_config = proto.Field( + proto.MESSAGE, + number=4, + message='InstanceGroupAutoscalingPolicyConfig', + ) + secondary_worker_config = proto.Field( + proto.MESSAGE, + number=5, + message='InstanceGroupAutoscalingPolicyConfig', + ) + + +class BasicAutoscalingAlgorithm(proto.Message): + r"""Basic algorithm for autoscaling. + Attributes: + yarn_config (google.cloud.dataproc_v1beta2.types.BasicYarnAutoscalingConfig): + Required. YARN autoscaling configuration. + cooldown_period (google.protobuf.duration_pb2.Duration): + Optional. Duration between scaling events. A scaling period + starts after the update operation from the previous event + has completed. + + Bounds: [2m, 1d]. Default: 2m. + """ + + yarn_config = proto.Field( + proto.MESSAGE, + number=1, + message='BasicYarnAutoscalingConfig', + ) + cooldown_period = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + + +class BasicYarnAutoscalingConfig(proto.Message): + r"""Basic autoscaling configurations for YARN. + Attributes: + graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): + Required. Timeout for YARN graceful decommissioning of Node + Managers. Specifies the duration to wait for jobs to + complete before forcefully removing workers (and potentially + interrupting jobs). Only applicable to downscaling + operations. + + Bounds: [0s, 1d]. + scale_up_factor (float): + Required. Fraction of average YARN pending memory in the + last cooldown period for which to add workers. A scale-up + factor of 1.0 will result in scaling up so that there is no + pending memory remaining after the update (more aggressive + scaling). A scale-up factor closer to 0 will result in a + smaller magnitude of scaling up (less aggressive scaling). + See `How autoscaling + works `__ + for more information. + + Bounds: [0.0, 1.0]. + scale_down_factor (float): + Required. Fraction of average YARN pending memory in the + last cooldown period for which to remove workers. A + scale-down factor of 1 will result in scaling down so that + there is no available memory remaining after the update + (more aggressive scaling). A scale-down factor of 0 disables + removing workers, which can be beneficial for autoscaling a + single job. See `How autoscaling + works `__ + for more information. + + Bounds: [0.0, 1.0]. + scale_up_min_worker_fraction (float): + Optional. Minimum scale-up threshold as a fraction of total + cluster size before scaling occurs. For example, in a + 20-worker cluster, a threshold of 0.1 means the autoscaler + must recommend at least a 2-worker scale-up for the cluster + to scale. A threshold of 0 means the autoscaler will scale + up on any recommended change. + + Bounds: [0.0, 1.0]. Default: 0.0. + scale_down_min_worker_fraction (float): + Optional. Minimum scale-down threshold as a fraction of + total cluster size before scaling occurs. For example, in a + 20-worker cluster, a threshold of 0.1 means the autoscaler + must recommend at least a 2 worker scale-down for the + cluster to scale. A threshold of 0 means the autoscaler will + scale down on any recommended change. + + Bounds: [0.0, 1.0]. Default: 0.0. + """ + + graceful_decommission_timeout = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + scale_up_factor = proto.Field( + proto.DOUBLE, + number=1, + ) + scale_down_factor = proto.Field( + proto.DOUBLE, + number=2, + ) + scale_up_min_worker_fraction = proto.Field( + proto.DOUBLE, + number=3, + ) + scale_down_min_worker_fraction = proto.Field( + proto.DOUBLE, + number=4, + ) + + +class InstanceGroupAutoscalingPolicyConfig(proto.Message): + r"""Configuration for the size bounds of an instance group, + including its proportional size to other groups. + + Attributes: + min_instances (int): + Optional. Minimum number of instances for this group. + + Primary workers - Bounds: [2, max_instances]. Default: 2. + Secondary workers - Bounds: [0, max_instances]. Default: 0. + max_instances (int): + Optional. Maximum number of instances for this group. + Required for primary workers. Note that by default, clusters + will not use secondary workers. Required for secondary + workers if the minimum secondary instances is set. + + Primary workers - Bounds: [min_instances, ). Required. + Secondary workers - Bounds: [min_instances, ). Default: 0. + weight (int): + Optional. Weight for the instance group, which is used to + determine the fraction of total workers in the cluster from + this instance group. For example, if primary workers have + weight 2, and secondary workers have weight 1, the cluster + will have approximately 2 primary workers for each secondary + worker. + + The cluster may not reach the specified balance if + constrained by min/max bounds or other autoscaling settings. + For example, if ``max_instances`` for secondary workers is + 0, then only primary workers will be added. The cluster can + also be out of balance when created. + + If weight is not set on any instance group, the cluster will + default to equal weight for all groups: the cluster will + attempt to maintain an equal number of workers in each group + within the configured size bounds for each group. If weight + is set for one group only, the cluster will default to zero + weight on the unset group. For example if weight is set only + on primary workers, the cluster will use primary workers + only and no secondary workers. + """ + + min_instances = proto.Field( + proto.INT32, + number=1, + ) + max_instances = proto.Field( + proto.INT32, + number=2, + ) + weight = proto.Field( + proto.INT32, + number=3, + ) + + +class CreateAutoscalingPolicyRequest(proto.Message): + r"""A request to create an autoscaling policy. + Attributes: + parent (str): + Required. The "resource name" of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, the + resource name has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.create``, + the resource name has the following format: + ``projects/{project_id}/locations/{location}`` + policy (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): + Required. The autoscaling policy to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + policy = proto.Field( + proto.MESSAGE, + number=2, + message='AutoscalingPolicy', + ) + + +class GetAutoscalingPolicyRequest(proto.Message): + r"""A request to fetch an autoscaling policy. + Attributes: + name (str): + Required. The "resource name" of the autoscaling policy, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateAutoscalingPolicyRequest(proto.Message): + r"""A request to update an autoscaling policy. + Attributes: + policy (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): + Required. The updated autoscaling policy. + """ + + policy = proto.Field( + proto.MESSAGE, + number=1, + message='AutoscalingPolicy', + ) + + +class DeleteAutoscalingPolicyRequest(proto.Message): + r"""A request to delete an autoscaling policy. + Autoscaling policies in use by one or more clusters will not be + deleted. + + Attributes: + name (str): + Required. The "resource name" of the autoscaling policy, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.delete``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.delete``, + the resource name of the policy has the following format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListAutoscalingPoliciesRequest(proto.Message): + r"""A request to list autoscaling policies in a project. + Attributes: + parent (str): + Required. The "resource name" of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.list``, the + resource name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + page_size (int): + Optional. The maximum number of results to + return in each response. Must be less than or + equal to 1000. Defaults to 100. + page_token (str): + Optional. The page token, returned by a + previous call, to request the next page of + results. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + + +class ListAutoscalingPoliciesResponse(proto.Message): + r"""A response to a request to list autoscaling policies in a + project. + + Attributes: + policies (Sequence[google.cloud.dataproc_v1beta2.types.AutoscalingPolicy]): + Output only. Autoscaling policies list. + next_page_token (str): + Output only. This token is included in the + response if there are more results to fetch. + """ + + @property + def raw_page(self): + return self + + policies = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='AutoscalingPolicy', + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/clusters.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/clusters.py new file mode 100644 index 00000000..d428148e --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/clusters.py @@ -0,0 +1,1545 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.dataproc_v1beta2.types import shared +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.dataproc.v1beta2', + manifest={ + 'Cluster', + 'ClusterConfig', + 'GkeClusterConfig', + 'EndpointConfig', + 'AutoscalingConfig', + 'EncryptionConfig', + 'GceClusterConfig', + 'InstanceGroupConfig', + 'ManagedGroupConfig', + 'AcceleratorConfig', + 'DiskConfig', + 'LifecycleConfig', + 'SecurityConfig', + 'KerberosConfig', + 'NodeInitializationAction', + 'ClusterStatus', + 'SoftwareConfig', + 'ClusterMetrics', + 'CreateClusterRequest', + 'UpdateClusterRequest', + 'DeleteClusterRequest', + 'GetClusterRequest', + 'ListClustersRequest', + 'ListClustersResponse', + 'DiagnoseClusterRequest', + 'DiagnoseClusterResults', + 'ReservationAffinity', + }, +) + + +class Cluster(proto.Message): + r"""Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + Attributes: + project_id (str): + Required. The Google Cloud Platform project + ID that the cluster belongs to. + cluster_name (str): + Required. The cluster name. Cluster names + within a project must be unique. Names of + deleted clusters can be reused. + config (google.cloud.dataproc_v1beta2.types.ClusterConfig): + Required. The cluster config. Note that + Dataproc may set default values, and values may + change when clusters are updated. + labels (Sequence[google.cloud.dataproc_v1beta2.types.Cluster.LabelsEntry]): + Optional. The labels to associate with this cluster. Label + **keys** must contain 1 to 63 characters, and must conform + to `RFC 1035 `__. + Label **values** may be empty, but, if present, must contain + 1 to 63 characters, and must conform to `RFC + 1035 `__. No more than + 32 labels can be associated with a cluster. + status (google.cloud.dataproc_v1beta2.types.ClusterStatus): + Output only. Cluster status. + status_history (Sequence[google.cloud.dataproc_v1beta2.types.ClusterStatus]): + Output only. The previous cluster status. + cluster_uuid (str): + Output only. A cluster UUID (Unique Universal + Identifier). Dataproc generates this value when + it creates the cluster. + metrics (google.cloud.dataproc_v1beta2.types.ClusterMetrics): + Output only. Contains cluster daemon metrics such as HDFS + and YARN stats. + + **Beta Feature**: This report is available for testing + purposes only. It may be changed before final release. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + cluster_name = proto.Field( + proto.STRING, + number=2, + ) + config = proto.Field( + proto.MESSAGE, + number=3, + message='ClusterConfig', + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + status = proto.Field( + proto.MESSAGE, + number=4, + message='ClusterStatus', + ) + status_history = proto.RepeatedField( + proto.MESSAGE, + number=7, + message='ClusterStatus', + ) + cluster_uuid = proto.Field( + proto.STRING, + number=6, + ) + metrics = proto.Field( + proto.MESSAGE, + number=9, + message='ClusterMetrics', + ) + + +class ClusterConfig(proto.Message): + r"""The cluster config. + Attributes: + config_bucket (str): + Optional. A Cloud Storage bucket used to stage job + dependencies, config files, and job driver console output. + If you do not specify a staging bucket, Cloud Dataproc will + determine a Cloud Storage location (US, ASIA, or EU) for + your cluster's staging bucket according to the Compute + Engine zone where your cluster is deployed, and then create + and manage this project-level, per-location bucket (see + `Dataproc staging + bucket `__). + temp_bucket (str): + Optional. A Cloud Storage bucket used to + store ephemeral cluster and jobs data, such as + Spark and MapReduce history files. If you do not + specify a temp bucket, Dataproc will determine a + Cloud Storage location (US, ASIA, or EU) for + your cluster's temp bucket according to the + Compute Engine zone where your cluster is + deployed, and then create and manage this + project-level, per-location bucket. The default + bucket has a TTL of 90 days, but you can use any + TTL (or none) if you specify a bucket. + gce_cluster_config (google.cloud.dataproc_v1beta2.types.GceClusterConfig): + Optional. The shared Compute Engine config + settings for all instances in a cluster. + master_config (google.cloud.dataproc_v1beta2.types.InstanceGroupConfig): + Optional. The Compute Engine config settings + for the master instance in a cluster. + worker_config (google.cloud.dataproc_v1beta2.types.InstanceGroupConfig): + Optional. The Compute Engine config settings + for worker instances in a cluster. + secondary_worker_config (google.cloud.dataproc_v1beta2.types.InstanceGroupConfig): + Optional. The Compute Engine config settings + for additional worker instances in a cluster. + software_config (google.cloud.dataproc_v1beta2.types.SoftwareConfig): + Optional. The config settings for software + inside the cluster. + lifecycle_config (google.cloud.dataproc_v1beta2.types.LifecycleConfig): + Optional. The config setting for auto delete + cluster schedule. + initialization_actions (Sequence[google.cloud.dataproc_v1beta2.types.NodeInitializationAction]): + Optional. Commands to execute on each node after config is + completed. By default, executables are run on master and all + worker nodes. You can test a node's role metadata to run an + executable on a master or worker node, as shown below using + ``curl`` (you can also use ``wget``): + + :: + + ROLE=$(curl -H Metadata-Flavor:Google + http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + if [[ "${ROLE}" == 'Master' ]]; then + ... master specific actions ... + else + ... worker specific actions ... + fi + encryption_config (google.cloud.dataproc_v1beta2.types.EncryptionConfig): + Optional. Encryption settings for the + cluster. + autoscaling_config (google.cloud.dataproc_v1beta2.types.AutoscalingConfig): + Optional. Autoscaling config for the policy + associated with the cluster. Cluster does not + autoscale if this field is unset. + endpoint_config (google.cloud.dataproc_v1beta2.types.EndpointConfig): + Optional. Port/endpoint configuration for + this cluster + security_config (google.cloud.dataproc_v1beta2.types.SecurityConfig): + Optional. Security related configuration. + gke_cluster_config (google.cloud.dataproc_v1beta2.types.GkeClusterConfig): + Optional. The Kubernetes Engine config for Dataproc clusters + deployed to Kubernetes. Setting this is considered mutually + exclusive with Compute Engine-based options such as + ``gce_cluster_config``, ``master_config``, + ``worker_config``, ``secondary_worker_config``, and + ``autoscaling_config``. + """ + + config_bucket = proto.Field( + proto.STRING, + number=1, + ) + temp_bucket = proto.Field( + proto.STRING, + number=2, + ) + gce_cluster_config = proto.Field( + proto.MESSAGE, + number=8, + message='GceClusterConfig', + ) + master_config = proto.Field( + proto.MESSAGE, + number=9, + message='InstanceGroupConfig', + ) + worker_config = proto.Field( + proto.MESSAGE, + number=10, + message='InstanceGroupConfig', + ) + secondary_worker_config = proto.Field( + proto.MESSAGE, + number=12, + message='InstanceGroupConfig', + ) + software_config = proto.Field( + proto.MESSAGE, + number=13, + message='SoftwareConfig', + ) + lifecycle_config = proto.Field( + proto.MESSAGE, + number=14, + message='LifecycleConfig', + ) + initialization_actions = proto.RepeatedField( + proto.MESSAGE, + number=11, + message='NodeInitializationAction', + ) + encryption_config = proto.Field( + proto.MESSAGE, + number=15, + message='EncryptionConfig', + ) + autoscaling_config = proto.Field( + proto.MESSAGE, + number=16, + message='AutoscalingConfig', + ) + endpoint_config = proto.Field( + proto.MESSAGE, + number=17, + message='EndpointConfig', + ) + security_config = proto.Field( + proto.MESSAGE, + number=18, + message='SecurityConfig', + ) + gke_cluster_config = proto.Field( + proto.MESSAGE, + number=19, + message='GkeClusterConfig', + ) + + +class GkeClusterConfig(proto.Message): + r"""The GKE config for this cluster. + Attributes: + namespaced_gke_deployment_target (google.cloud.dataproc_v1beta2.types.GkeClusterConfig.NamespacedGkeDeploymentTarget): + Optional. A target for the deployment. + """ + + class NamespacedGkeDeploymentTarget(proto.Message): + r"""A full, namespace-isolated deployment target for an existing + GKE cluster. + + Attributes: + target_gke_cluster (str): + Optional. The target GKE cluster to deploy to. Format: + 'projects/{project}/locations/{location}/clusters/{cluster_id}' + cluster_namespace (str): + Optional. A namespace within the GKE cluster + to deploy into. + """ + + target_gke_cluster = proto.Field( + proto.STRING, + number=1, + ) + cluster_namespace = proto.Field( + proto.STRING, + number=2, + ) + + namespaced_gke_deployment_target = proto.Field( + proto.MESSAGE, + number=1, + message=NamespacedGkeDeploymentTarget, + ) + + +class EndpointConfig(proto.Message): + r"""Endpoint config for this cluster + Attributes: + http_ports (Sequence[google.cloud.dataproc_v1beta2.types.EndpointConfig.HttpPortsEntry]): + Output only. The map of port descriptions to URLs. Will only + be populated if enable_http_port_access is true. + enable_http_port_access (bool): + Optional. If true, enable http access to + specific ports on the cluster from external + sources. Defaults to false. + """ + + http_ports = proto.MapField( + proto.STRING, + proto.STRING, + number=1, + ) + enable_http_port_access = proto.Field( + proto.BOOL, + number=2, + ) + + +class AutoscalingConfig(proto.Message): + r"""Autoscaling Policy config associated with the cluster. + Attributes: + policy_uri (str): + Optional. The autoscaling policy used by the cluster. + + Only resource names including projectid and location + (region) are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`` + - ``projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`` + + Note that the policy must be in the same project and + Dataproc region. + """ + + policy_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class EncryptionConfig(proto.Message): + r"""Encryption settings for the cluster. + Attributes: + gce_pd_kms_key_name (str): + Optional. The Cloud KMS key name to use for + PD disk encryption for all instances in the + cluster. + """ + + gce_pd_kms_key_name = proto.Field( + proto.STRING, + number=1, + ) + + +class GceClusterConfig(proto.Message): + r"""Common config settings for resources of Compute Engine + cluster instances, applicable to all instances in the cluster. + + Attributes: + zone_uri (str): + Optional. The zone where the Compute Engine cluster will be + located. On a create request, it is required in the "global" + region. If omitted in a non-global Dataproc region, the + service will pick a zone in the corresponding Compute Engine + region. On a get request, zone will always be present. + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`` + - ``projects/[project_id]/zones/[zone]`` + - ``us-central1-f`` + network_uri (str): + Optional. The Compute Engine network to be used for machine + communications. Cannot be specified with subnetwork_uri. If + neither ``network_uri`` nor ``subnetwork_uri`` is specified, + the "default" network of the project is used, if it exists. + Cannot be a "Custom Subnet Network" (see `Using + Subnetworks `__ + for more information). + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`` + - ``projects/[project_id]/regions/global/default`` + - ``default`` + subnetwork_uri (str): + Optional. The Compute Engine subnetwork to be used for + machine communications. Cannot be specified with + network_uri. + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0`` + - ``projects/[project_id]/regions/us-east1/subnetworks/sub0`` + - ``sub0`` + internal_ip_only (bool): + Optional. If true, all instances in the cluster will only + have internal IP addresses. By default, clusters are not + restricted to internal IP addresses, and will have ephemeral + external IP addresses assigned to each instance. This + ``internal_ip_only`` restriction can only be enabled for + subnetwork enabled networks, and all off-cluster + dependencies must be configured to be accessible without + external IP addresses. + service_account (str): + Optional. The `Dataproc service + account `__ + (also see `VM Data Plane + identity `__) + used by Dataproc cluster VM instances to access Google Cloud + Platform services. + + If not specified, the `Compute Engine default service + account `__ + is used. + service_account_scopes (Sequence[str]): + Optional. The URIs of service account scopes to be included + in Compute Engine instances. The following base set of + scopes is always included: + + - https://www.googleapis.com/auth/cloud.useraccounts.readonly + - https://www.googleapis.com/auth/devstorage.read_write + - https://www.googleapis.com/auth/logging.write + + If no scopes are specified, the following defaults are also + provided: + + - https://www.googleapis.com/auth/bigquery + - https://www.googleapis.com/auth/bigtable.admin.table + - https://www.googleapis.com/auth/bigtable.data + - https://www.googleapis.com/auth/devstorage.full_control + tags (Sequence[str]): + The Compute Engine tags to add to all instances (see + `Tagging + instances `__). + metadata (Sequence[google.cloud.dataproc_v1beta2.types.GceClusterConfig.MetadataEntry]): + The Compute Engine metadata entries to add to all instances + (see `Project and instance + metadata `__). + reservation_affinity (google.cloud.dataproc_v1beta2.types.ReservationAffinity): + Optional. Reservation Affinity for consuming + Zonal reservation. + """ + + zone_uri = proto.Field( + proto.STRING, + number=1, + ) + network_uri = proto.Field( + proto.STRING, + number=2, + ) + subnetwork_uri = proto.Field( + proto.STRING, + number=6, + ) + internal_ip_only = proto.Field( + proto.BOOL, + number=7, + ) + service_account = proto.Field( + proto.STRING, + number=8, + ) + service_account_scopes = proto.RepeatedField( + proto.STRING, + number=3, + ) + tags = proto.RepeatedField( + proto.STRING, + number=4, + ) + metadata = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + reservation_affinity = proto.Field( + proto.MESSAGE, + number=11, + message='ReservationAffinity', + ) + + +class InstanceGroupConfig(proto.Message): + r"""The config settings for Compute Engine resources in + an instance group, such as a master or worker group. + + Attributes: + num_instances (int): + Optional. The number of VM instances in the + instance group. For master instance groups, must + be set to 1. + instance_names (Sequence[str]): + Output only. The list of instance names. Dataproc derives + the names from ``cluster_name``, ``num_instances``, and the + instance group. + image_uri (str): + Optional. The Compute Engine image resource used for cluster + instances. + + The URI can represent an image or image family. + + Image examples: + + - ``https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`` + - ``projects/[project_id]/global/images/[image-id]`` + - ``image-id`` + + Image family examples. Dataproc will use the most recent + image from the family: + + - ``https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`` + - ``projects/[project_id]/global/images/family/[custom-image-family-name]`` + + If the URI is unspecified, it will be inferred from + ``SoftwareConfig.image_version`` or the system default. + machine_type_uri (str): + Optional. The Compute Engine machine type used for cluster + instances. + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` + - ``projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` + - ``n1-standard-2`` + + **Auto Zone Exception**: If you are using the Dataproc `Auto + Zone + Placement `__ + feature, you must use the short name of the machine type + resource, for example, ``n1-standard-2``. + disk_config (google.cloud.dataproc_v1beta2.types.DiskConfig): + Optional. Disk option config settings. + is_preemptible (bool): + Output only. Specifies that this instance + group contains preemptible instances. + preemptibility (google.cloud.dataproc_v1beta2.types.InstanceGroupConfig.Preemptibility): + Optional. Specifies the preemptibility of the instance + group. + + The default value for master and worker groups is + ``NON_PREEMPTIBLE``. This default cannot be changed. + + The default value for secondary instances is + ``PREEMPTIBLE``. + managed_group_config (google.cloud.dataproc_v1beta2.types.ManagedGroupConfig): + Output only. The config for Compute Engine + Instance Group Manager that manages this group. + This is only used for preemptible instance + groups. + accelerators (Sequence[google.cloud.dataproc_v1beta2.types.AcceleratorConfig]): + Optional. The Compute Engine accelerator + configuration for these instances. + min_cpu_platform (str): + Specifies the minimum cpu platform for the Instance Group. + See `Dataproc -> Minimum CPU + Platform `__. + """ + class Preemptibility(proto.Enum): + r"""Controls the use of [preemptible instances] + (https://cloud.google.com/compute/docs/instances/preemptible) within + the group. + """ + PREEMPTIBILITY_UNSPECIFIED = 0 + NON_PREEMPTIBLE = 1 + PREEMPTIBLE = 2 + + num_instances = proto.Field( + proto.INT32, + number=1, + ) + instance_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + image_uri = proto.Field( + proto.STRING, + number=3, + ) + machine_type_uri = proto.Field( + proto.STRING, + number=4, + ) + disk_config = proto.Field( + proto.MESSAGE, + number=5, + message='DiskConfig', + ) + is_preemptible = proto.Field( + proto.BOOL, + number=6, + ) + preemptibility = proto.Field( + proto.ENUM, + number=10, + enum=Preemptibility, + ) + managed_group_config = proto.Field( + proto.MESSAGE, + number=7, + message='ManagedGroupConfig', + ) + accelerators = proto.RepeatedField( + proto.MESSAGE, + number=8, + message='AcceleratorConfig', + ) + min_cpu_platform = proto.Field( + proto.STRING, + number=9, + ) + + +class ManagedGroupConfig(proto.Message): + r"""Specifies the resources used to actively manage an instance + group. + + Attributes: + instance_template_name (str): + Output only. The name of the Instance + Template used for the Managed Instance Group. + instance_group_manager_name (str): + Output only. The name of the Instance Group + Manager for this group. + """ + + instance_template_name = proto.Field( + proto.STRING, + number=1, + ) + instance_group_manager_name = proto.Field( + proto.STRING, + number=2, + ) + + +class AcceleratorConfig(proto.Message): + r"""Specifies the type and number of accelerator cards attached to the + instances of an instance group (see `GPUs on Compute + Engine `__). + + Attributes: + accelerator_type_uri (str): + Full URL, partial URI, or short name of the accelerator type + resource to expose to this instance. See `Compute Engine + AcceleratorTypes `__ + + Examples + + - ``https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` + - ``projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` + - ``nvidia-tesla-k80`` + + **Auto Zone Exception**: If you are using the Dataproc `Auto + Zone + Placement `__ + feature, you must use the short name of the accelerator type + resource, for example, ``nvidia-tesla-k80``. + accelerator_count (int): + The number of the accelerator cards of this + type exposed to this instance. + """ + + accelerator_type_uri = proto.Field( + proto.STRING, + number=1, + ) + accelerator_count = proto.Field( + proto.INT32, + number=2, + ) + + +class DiskConfig(proto.Message): + r"""Specifies the config of disk options for a group of VM + instances. + + Attributes: + boot_disk_type (str): + Optional. Type of the boot disk (default is + "pd-standard"). Valid values: "pd-ssd" + (Persistent Disk Solid State Drive) or "pd- + standard" (Persistent Disk Hard Disk Drive). + boot_disk_size_gb (int): + Optional. Size in GB of the boot disk + (default is 500GB). + num_local_ssds (int): + Number of attached SSDs, from 0 to 4 (default is 0). If SSDs + are not attached, the boot disk is used to store runtime + logs and + `HDFS `__ + data. If one or more SSDs are attached, this runtime bulk + data is spread across them, and the boot disk contains only + basic config and installed binaries. + """ + + boot_disk_type = proto.Field( + proto.STRING, + number=3, + ) + boot_disk_size_gb = proto.Field( + proto.INT32, + number=1, + ) + num_local_ssds = proto.Field( + proto.INT32, + number=2, + ) + + +class LifecycleConfig(proto.Message): + r"""Specifies the cluster auto-delete schedule configuration. + Attributes: + idle_delete_ttl (google.protobuf.duration_pb2.Duration): + Optional. The duration to keep the cluster alive while + idling (when no jobs are running). Passing this threshold + will cause the cluster to be deleted. Minimum value is 10 + minutes; maximum value is 14 days (see JSON representation + of + `Duration `__. + auto_delete_time (google.protobuf.timestamp_pb2.Timestamp): + Optional. The time when cluster will be auto-deleted. (see + JSON representation of + `Timestamp `__). + auto_delete_ttl (google.protobuf.duration_pb2.Duration): + Optional. The lifetime duration of cluster. The cluster will + be auto-deleted at the end of this period. Minimum value is + 10 minutes; maximum value is 14 days (see JSON + representation of + `Duration `__). + idle_start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when cluster became idle (most recent + job finished) and became eligible for deletion due to + idleness (see JSON representation of + `Timestamp `__). + """ + + idle_delete_ttl = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + auto_delete_time = proto.Field( + proto.MESSAGE, + number=2, + oneof='ttl', + message=timestamp_pb2.Timestamp, + ) + auto_delete_ttl = proto.Field( + proto.MESSAGE, + number=3, + oneof='ttl', + message=duration_pb2.Duration, + ) + idle_start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class SecurityConfig(proto.Message): + r"""Security related configuration, including encryption, + Kerberos, etc. + + Attributes: + kerberos_config (google.cloud.dataproc_v1beta2.types.KerberosConfig): + Kerberos related configuration. + """ + + kerberos_config = proto.Field( + proto.MESSAGE, + number=1, + message='KerberosConfig', + ) + + +class KerberosConfig(proto.Message): + r"""Specifies Kerberos related configuration. + Attributes: + enable_kerberos (bool): + Optional. Flag to indicate whether to + Kerberize the cluster (default: false). Set this + field to true to enable Kerberos on a cluster. + root_principal_password_uri (str): + Required. The Cloud Storage URI of a KMS + encrypted file containing the root principal + password. + kms_key_uri (str): + Required. The uri of the KMS key used to + encrypt various sensitive files. + keystore_uri (str): + Optional. The Cloud Storage URI of the + keystore file used for SSL encryption. If not + provided, Dataproc will provide a self-signed + certificate. + truststore_uri (str): + Optional. The Cloud Storage URI of the + truststore file used for SSL encryption. If not + provided, Dataproc will provide a self-signed + certificate. + keystore_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the password to the + user provided keystore. For the self-signed + certificate, this password is generated by + Dataproc. + key_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the password to the + user provided key. For the self-signed + certificate, this password is generated by + Dataproc. + truststore_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the password to the + user provided truststore. For the self-signed + certificate, this password is generated by + Dataproc. + cross_realm_trust_realm (str): + Optional. The remote realm the Dataproc on- + luster KDC will trust, should the user enable + cross realm trust. + cross_realm_trust_kdc (str): + Optional. The KDC (IP or hostname) for the + remote trusted realm in a cross realm trust + relationship. + cross_realm_trust_admin_server (str): + Optional. The admin server (IP or hostname) + for the remote trusted realm in a cross realm + trust relationship. + cross_realm_trust_shared_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the shared password + between the on-cluster Kerberos realm and the + remote trusted realm, in a cross realm trust + relationship. + kdc_db_key_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the master key of the + KDC database. + tgt_lifetime_hours (int): + Optional. The lifetime of the ticket granting + ticket, in hours. If not specified, or user + specifies 0, then default value 10 will be used. + realm (str): + Optional. The name of the on-cluster Kerberos + realm. If not specified, the uppercased domain + of hostnames will be the realm. + """ + + enable_kerberos = proto.Field( + proto.BOOL, + number=1, + ) + root_principal_password_uri = proto.Field( + proto.STRING, + number=2, + ) + kms_key_uri = proto.Field( + proto.STRING, + number=3, + ) + keystore_uri = proto.Field( + proto.STRING, + number=4, + ) + truststore_uri = proto.Field( + proto.STRING, + number=5, + ) + keystore_password_uri = proto.Field( + proto.STRING, + number=6, + ) + key_password_uri = proto.Field( + proto.STRING, + number=7, + ) + truststore_password_uri = proto.Field( + proto.STRING, + number=8, + ) + cross_realm_trust_realm = proto.Field( + proto.STRING, + number=9, + ) + cross_realm_trust_kdc = proto.Field( + proto.STRING, + number=10, + ) + cross_realm_trust_admin_server = proto.Field( + proto.STRING, + number=11, + ) + cross_realm_trust_shared_password_uri = proto.Field( + proto.STRING, + number=12, + ) + kdc_db_key_uri = proto.Field( + proto.STRING, + number=13, + ) + tgt_lifetime_hours = proto.Field( + proto.INT32, + number=14, + ) + realm = proto.Field( + proto.STRING, + number=15, + ) + + +class NodeInitializationAction(proto.Message): + r"""Specifies an executable to run on a fully configured node and + a timeout period for executable completion. + + Attributes: + executable_file (str): + Required. Cloud Storage URI of executable + file. + execution_timeout (google.protobuf.duration_pb2.Duration): + Optional. Amount of time executable has to complete. Default + is 10 minutes (see JSON representation of + `Duration `__). + + Cluster creation fails with an explanatory error message + (the name of the executable that caused the error and the + exceeded timeout period) if the executable is not completed + at end of the timeout period. + """ + + executable_file = proto.Field( + proto.STRING, + number=1, + ) + execution_timeout = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + + +class ClusterStatus(proto.Message): + r"""The status of a cluster and its instances. + Attributes: + state (google.cloud.dataproc_v1beta2.types.ClusterStatus.State): + Output only. The cluster's state. + detail (str): + Output only. Optional details of cluster's + state. + state_start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when this state was entered (see JSON + representation of + `Timestamp `__). + substate (google.cloud.dataproc_v1beta2.types.ClusterStatus.Substate): + Output only. Additional state information + that includes status reported by the agent. + """ + class State(proto.Enum): + r"""The cluster state.""" + UNKNOWN = 0 + CREATING = 1 + RUNNING = 2 + ERROR = 3 + DELETING = 4 + UPDATING = 5 + STOPPING = 6 + STOPPED = 7 + STARTING = 8 + + class Substate(proto.Enum): + r"""The cluster substate.""" + UNSPECIFIED = 0 + UNHEALTHY = 1 + STALE_STATUS = 2 + + state = proto.Field( + proto.ENUM, + number=1, + enum=State, + ) + detail = proto.Field( + proto.STRING, + number=2, + ) + state_start_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + substate = proto.Field( + proto.ENUM, + number=4, + enum=Substate, + ) + + +class SoftwareConfig(proto.Message): + r"""Specifies the selection and config of software inside the + cluster. + + Attributes: + image_version (str): + Optional. The version of software inside the cluster. It + must be one of the supported `Dataproc + Versions `__, + such as "1.2" (including a subminor version, such as + "1.2.29"), or the `"preview" + version `__. + If unspecified, it defaults to the latest Debian version. + properties (Sequence[google.cloud.dataproc_v1beta2.types.SoftwareConfig.PropertiesEntry]): + Optional. The properties to set on daemon config files. + + Property keys are specified in ``prefix:property`` format, + for example ``core:hadoop.tmp.dir``. The following are + supported prefixes and their mappings: + + - capacity-scheduler: ``capacity-scheduler.xml`` + - core: ``core-site.xml`` + - distcp: ``distcp-default.xml`` + - hdfs: ``hdfs-site.xml`` + - hive: ``hive-site.xml`` + - mapred: ``mapred-site.xml`` + - pig: ``pig.properties`` + - spark: ``spark-defaults.conf`` + - yarn: ``yarn-site.xml`` + + For more information, see `Cluster + properties `__. + optional_components (Sequence[google.cloud.dataproc_v1beta2.types.Component]): + The set of optional components to activate on + the cluster. + """ + + image_version = proto.Field( + proto.STRING, + number=1, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + optional_components = proto.RepeatedField( + proto.ENUM, + number=3, + enum=shared.Component, + ) + + +class ClusterMetrics(proto.Message): + r"""Contains cluster daemon metrics, such as HDFS and YARN stats. + + **Beta Feature**: This report is available for testing purposes + only. It may be changed before final release. + + Attributes: + hdfs_metrics (Sequence[google.cloud.dataproc_v1beta2.types.ClusterMetrics.HdfsMetricsEntry]): + The HDFS metrics. + yarn_metrics (Sequence[google.cloud.dataproc_v1beta2.types.ClusterMetrics.YarnMetricsEntry]): + The YARN metrics. + """ + + hdfs_metrics = proto.MapField( + proto.STRING, + proto.INT64, + number=1, + ) + yarn_metrics = proto.MapField( + proto.STRING, + proto.INT64, + number=2, + ) + + +class CreateClusterRequest(proto.Message): + r"""A request to create a cluster. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster (google.cloud.dataproc_v1beta2.types.Cluster): + Required. The cluster to create. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] + requests with the same id, then the second request will be + ignored and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + cluster = proto.Field( + proto.MESSAGE, + number=2, + message='Cluster', + ) + request_id = proto.Field( + proto.STRING, + number=4, + ) + + +class UpdateClusterRequest(proto.Message): + r"""A request to update a cluster. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + cluster (google.cloud.dataproc_v1beta2.types.Cluster): + Required. The changes to the cluster. + graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): + Optional. Timeout for graceful YARN decomissioning. Graceful + decommissioning allows removing nodes from the cluster + without interrupting jobs in progress. Timeout specifies how + long to wait for jobs in progress to finish before + forcefully removing nodes (and potentially interrupting + jobs). Default timeout is 0 (for forceful decommission), and + the maximum allowed timeout is 1 day (see JSON + representation of + `Duration `__). + + Only supported on Dataproc image versions 1.2 and higher. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Specifies the path, relative to ``Cluster``, of + the field to update. For example, to change the number of + workers in a cluster to 5, the ``update_mask`` parameter + would be specified as + ``config.worker_config.num_instances``, and the ``PATCH`` + request body would specify the new value, as follows: + + :: + + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } + + Similarly, to change the number of preemptible workers in a + cluster to 5, the ``update_mask`` parameter would be + ``config.secondary_worker_config.num_instances``, and the + ``PATCH`` request body would be set as follows: + + :: + + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: currently only the following fields can be updated: + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + +
MaskPurpose
labelsUpdates labels
config.worker_config.num_instancesResize primary worker + group
config.secondary_worker_config.num_instancesResize secondary + worker group
config.lifecycle_config.auto_delete_ttlReset MAX TTL + duration
config.lifecycle_config.auto_delete_timeUpdate MAX TTL + deletion timestamp
config.lifecycle_config.idle_delete_ttlUpdate Idle TTL + duration
config.autoscaling_config.policy_uriUse, stop using, or change + autoscaling policies
+ request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] + requests with the same id, then the second request will be + ignored and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=5, + ) + cluster_name = proto.Field( + proto.STRING, + number=2, + ) + cluster = proto.Field( + proto.MESSAGE, + number=3, + message='Cluster', + ) + graceful_decommission_timeout = proto.Field( + proto.MESSAGE, + number=6, + message=duration_pb2.Duration, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=4, + message=field_mask_pb2.FieldMask, + ) + request_id = proto.Field( + proto.STRING, + number=7, + ) + + +class DeleteClusterRequest(proto.Message): + r"""A request to delete a cluster. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + cluster_uuid (str): + Optional. Specifying the ``cluster_uuid`` means the RPC + should fail (with error NOT_FOUND) if cluster with specified + UUID does not exist. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] + requests with the same id, then the second request will be + ignored and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + cluster_name = proto.Field( + proto.STRING, + number=2, + ) + cluster_uuid = proto.Field( + proto.STRING, + number=4, + ) + request_id = proto.Field( + proto.STRING, + number=5, + ) + + +class GetClusterRequest(proto.Message): + r"""Request to get the resource representation for a cluster in a + project. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + cluster_name = proto.Field( + proto.STRING, + number=2, + ) + + +class ListClustersRequest(proto.Message): + r"""A request to list the clusters in a project. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + filter (str): + Optional. A filter constraining the clusters to list. + Filters are case-sensitive and have the following syntax: + + field = value [AND [field = value]] ... + + where **field** is one of ``status.state``, ``clusterName``, + or ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** + can be ``*`` to match all values. ``status.state`` can be + one of the following: ``ACTIVE``, ``INACTIVE``, + ``CREATING``, ``RUNNING``, ``ERROR``, ``DELETING``, or + ``UPDATING``. ``ACTIVE`` contains the ``CREATING``, + ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains + the ``DELETING`` and ``ERROR`` states. ``clusterName`` is + the name of the cluster provided at creation time. Only the + logical ``AND`` operator is supported; space-separated items + are treated as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND clusterName = mycluster AND + labels.env = staging AND labels.starred = \* + page_size (int): + Optional. The standard List page size. + page_token (str): + Optional. The standard List page token. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=4, + ) + filter = proto.Field( + proto.STRING, + number=5, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + + +class ListClustersResponse(proto.Message): + r"""The list of all clusters in a project. + Attributes: + clusters (Sequence[google.cloud.dataproc_v1beta2.types.Cluster]): + Output only. The clusters in the project. + next_page_token (str): + Output only. This token is included in the response if there + are more results to fetch. To fetch additional results, + provide this value as the ``page_token`` in a subsequent + ListClustersRequest. + """ + + @property + def raw_page(self): + return self + + clusters = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Cluster', + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DiagnoseClusterRequest(proto.Message): + r"""A request to collect cluster diagnostic information. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + cluster_name = proto.Field( + proto.STRING, + number=2, + ) + + +class DiagnoseClusterResults(proto.Message): + r"""The location of diagnostic output. + Attributes: + output_uri (str): + Output only. The Cloud Storage URI of the + diagnostic output. The output report is a plain + text file with a summary of collected + diagnostics. + """ + + output_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class ReservationAffinity(proto.Message): + r"""Reservation Affinity for consuming Zonal reservation. + Attributes: + consume_reservation_type (google.cloud.dataproc_v1beta2.types.ReservationAffinity.Type): + Optional. Type of reservation to consume + key (str): + Optional. Corresponds to the label key of + reservation resource. + values (Sequence[str]): + Optional. Corresponds to the label values of + reservation resource. + """ + class Type(proto.Enum): + r"""Indicates whether to consume capacity from an reservation or + not. + """ + TYPE_UNSPECIFIED = 0 + NO_RESERVATION = 1 + ANY_RESERVATION = 2 + SPECIFIC_RESERVATION = 3 + + consume_reservation_type = proto.Field( + proto.ENUM, + number=1, + enum=Type, + ) + key = proto.Field( + proto.STRING, + number=2, + ) + values = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/jobs.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/jobs.py new file mode 100644 index 00000000..6c736db5 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/jobs.py @@ -0,0 +1,1364 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.dataproc.v1beta2', + manifest={ + 'LoggingConfig', + 'HadoopJob', + 'SparkJob', + 'PySparkJob', + 'QueryList', + 'HiveJob', + 'SparkSqlJob', + 'PigJob', + 'SparkRJob', + 'PrestoJob', + 'JobPlacement', + 'JobStatus', + 'JobReference', + 'YarnApplication', + 'Job', + 'JobScheduling', + 'JobMetadata', + 'SubmitJobRequest', + 'GetJobRequest', + 'ListJobsRequest', + 'UpdateJobRequest', + 'ListJobsResponse', + 'CancelJobRequest', + 'DeleteJobRequest', + }, +) + + +class LoggingConfig(proto.Message): + r"""The runtime logging config of the job. + Attributes: + driver_log_levels (Sequence[google.cloud.dataproc_v1beta2.types.LoggingConfig.DriverLogLevelsEntry]): + The per-package log levels for the driver. + This may include "root" package name to + configure rootLogger. Examples: + 'com.google = FATAL', 'root = INFO', + 'org.apache = DEBUG' + """ + class Level(proto.Enum): + r"""The Log4j level for job execution. When running an `Apache + Hive `__ job, Cloud Dataproc configures the + Hive client to an equivalent verbosity level. + """ + LEVEL_UNSPECIFIED = 0 + ALL = 1 + TRACE = 2 + DEBUG = 3 + INFO = 4 + WARN = 5 + ERROR = 6 + FATAL = 7 + OFF = 8 + + driver_log_levels = proto.MapField( + proto.STRING, + proto.ENUM, + number=2, + enum=Level, + ) + + +class HadoopJob(proto.Message): + r"""A Dataproc job for running `Apache Hadoop + MapReduce `__ + jobs on `Apache Hadoop + YARN `__. + + Attributes: + main_jar_file_uri (str): + The HCFS URI of the jar file containing the + main class. Examples: + 'gs://foo-bucket/analytics-binaries/extract- + useful-metrics-mr.jar' 'hdfs:/tmp/test- + samples/custom-wordcount.jar' + 'file:///home/usr/lib/hadoop-mapreduce/hadoop- + mapreduce-examples.jar' + main_class (str): + The name of the driver's main class. The jar file containing + the class must be in the default CLASSPATH or specified in + ``jar_file_uris``. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``-libjars`` or ``-Dfoo=bar``, + that can be set as job properties, since a collision may + occur that causes an incorrect job submission. + jar_file_uris (Sequence[str]): + Optional. Jar file URIs to add to the + CLASSPATHs of the Hadoop driver and tasks. + file_uris (Sequence[str]): + Optional. HCFS (Hadoop Compatible Filesystem) + URIs of files to be copied to the working + directory of Hadoop drivers and distributed + tasks. Useful for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted in the working directory of Hadoop + drivers and tasks. Supported file types: .jar, + .tar, .tar.gz, .tgz, or .zip. + properties (Sequence[google.cloud.dataproc_v1beta2.types.HadoopJob.PropertiesEntry]): + Optional. A mapping of property names to values, used to + configure Hadoop. Properties that conflict with values set + by the Dataproc API may be overwritten. Can include + properties set in /etc/hadoop/conf/*-site and classes in + user code. + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_jar_file_uri = proto.Field( + proto.STRING, + number=1, + oneof='driver', + ) + main_class = proto.Field( + proto.STRING, + number=2, + oneof='driver', + ) + args = proto.RepeatedField( + proto.STRING, + number=3, + ) + jar_file_uris = proto.RepeatedField( + proto.STRING, + number=4, + ) + file_uris = proto.RepeatedField( + proto.STRING, + number=5, + ) + archive_uris = proto.RepeatedField( + proto.STRING, + number=6, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=8, + message='LoggingConfig', + ) + + +class SparkJob(proto.Message): + r"""A Dataproc job for running `Apache + Spark `__ applications on YARN. The + specification of the main method to call to drive the job. Specify + either the jar file that contains the main class or the main class + name. To pass both a main jar and a main class in that jar, add the + jar to ``CommonJob.jar_file_uris``, and then specify the main class + name in ``main_class``. + + Attributes: + main_jar_file_uri (str): + The HCFS URI of the jar file that contains + the main class. + main_class (str): + The name of the driver's main class. The jar file that + contains the class must be in the default CLASSPATH or + specified in ``jar_file_uris``. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``--conf``, that can be set as + job properties, since a collision may occur that causes an + incorrect job submission. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATHs of the Spark driver and tasks. + file_uris (Sequence[str]): + Optional. HCFS URIs of files to be placed in + the working directory of each executor. Useful + for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted into the working directory of each + executor. Supported file types: .jar, .tar, + .tar.gz, .tgz, and .zip. + properties (Sequence[google.cloud.dataproc_v1beta2.types.SparkJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure Spark. Properties that + conflict with values set by the Dataproc API may + be overwritten. Can include properties set in + /etc/spark/conf/spark-defaults.conf and classes + in user code. + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_jar_file_uri = proto.Field( + proto.STRING, + number=1, + oneof='driver', + ) + main_class = proto.Field( + proto.STRING, + number=2, + oneof='driver', + ) + args = proto.RepeatedField( + proto.STRING, + number=3, + ) + jar_file_uris = proto.RepeatedField( + proto.STRING, + number=4, + ) + file_uris = proto.RepeatedField( + proto.STRING, + number=5, + ) + archive_uris = proto.RepeatedField( + proto.STRING, + number=6, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=8, + message='LoggingConfig', + ) + + +class PySparkJob(proto.Message): + r"""A Dataproc job for running `Apache + PySpark `__ + applications on YARN. + + Attributes: + main_python_file_uri (str): + Required. The HCFS URI of the main Python + file to use as the driver. Must be a .py file. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``--conf``, that can be set as + job properties, since a collision may occur that causes an + incorrect job submission. + python_file_uris (Sequence[str]): + Optional. HCFS file URIs of Python files to + pass to the PySpark framework. Supported file + types: .py, .egg, and .zip. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATHs of the Python driver and tasks. + file_uris (Sequence[str]): + Optional. HCFS URIs of files to be placed in + the working directory of each executor. Useful + for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted into the working directory of each + executor. Supported file types: .jar, .tar, + .tar.gz, .tgz, and .zip. + properties (Sequence[google.cloud.dataproc_v1beta2.types.PySparkJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure PySpark. Properties + that conflict with values set by the Dataproc + API may be overwritten. Can include properties + set in + /etc/spark/conf/spark-defaults.conf and classes + in user code. + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_python_file_uri = proto.Field( + proto.STRING, + number=1, + ) + args = proto.RepeatedField( + proto.STRING, + number=2, + ) + python_file_uris = proto.RepeatedField( + proto.STRING, + number=3, + ) + jar_file_uris = proto.RepeatedField( + proto.STRING, + number=4, + ) + file_uris = proto.RepeatedField( + proto.STRING, + number=5, + ) + archive_uris = proto.RepeatedField( + proto.STRING, + number=6, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=8, + message='LoggingConfig', + ) + + +class QueryList(proto.Message): + r"""A list of queries to run on a cluster. + Attributes: + queries (Sequence[str]): + Required. The queries to execute. You do not need to + terminate a query with a semicolon. Multiple queries can be + specified in one string by separating each with a semicolon. + Here is an example of an Cloud Dataproc API snippet that + uses a QueryList to specify a HiveJob: + + :: + + "hiveJob": { + "queryList": { + "queries": [ + "query1", + "query2", + "query3;query4", + ] + } + } + """ + + queries = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class HiveJob(proto.Message): + r"""A Dataproc job for running `Apache + Hive `__ queries on YARN. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains Hive + queries. + query_list (google.cloud.dataproc_v1beta2.types.QueryList): + A list of queries. + continue_on_failure (bool): + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` + can be useful when executing independent parallel queries. + script_variables (Sequence[google.cloud.dataproc_v1beta2.types.HiveJob.ScriptVariablesEntry]): + Optional. Mapping of query variable names to values + (equivalent to the Hive command: ``SET name="value";``). + properties (Sequence[google.cloud.dataproc_v1beta2.types.HiveJob.PropertiesEntry]): + Optional. A mapping of property names and values, used to + configure Hive. Properties that conflict with values set by + the Dataproc API may be overwritten. Can include properties + set in /etc/hadoop/conf/*-site.xml, + /etc/hive/conf/hive-site.xml, and classes in user code. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATH of the Hive server and Hadoop + MapReduce (MR) tasks. Can contain Hive SerDes + and UDFs. + """ + + query_file_uri = proto.Field( + proto.STRING, + number=1, + oneof='queries', + ) + query_list = proto.Field( + proto.MESSAGE, + number=2, + oneof='queries', + message='QueryList', + ) + continue_on_failure = proto.Field( + proto.BOOL, + number=3, + ) + script_variables = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + jar_file_uris = proto.RepeatedField( + proto.STRING, + number=6, + ) + + +class SparkSqlJob(proto.Message): + r"""A Dataproc job for running `Apache Spark + SQL `__ queries. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains SQL + queries. + query_list (google.cloud.dataproc_v1beta2.types.QueryList): + A list of queries. + script_variables (Sequence[google.cloud.dataproc_v1beta2.types.SparkSqlJob.ScriptVariablesEntry]): + Optional. Mapping of query variable names to values + (equivalent to the Spark SQL command: SET + ``name="value";``). + properties (Sequence[google.cloud.dataproc_v1beta2.types.SparkSqlJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure Spark SQL's SparkConf. + Properties that conflict with values set by the + Dataproc API may be overwritten. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to be added + to the Spark CLASSPATH. + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + query_file_uri = proto.Field( + proto.STRING, + number=1, + oneof='queries', + ) + query_list = proto.Field( + proto.MESSAGE, + number=2, + oneof='queries', + message='QueryList', + ) + script_variables = proto.MapField( + proto.STRING, + proto.STRING, + number=3, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + jar_file_uris = proto.RepeatedField( + proto.STRING, + number=56, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=6, + message='LoggingConfig', + ) + + +class PigJob(proto.Message): + r"""A Dataproc job for running `Apache Pig `__ + queries on YARN. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains the + Pig queries. + query_list (google.cloud.dataproc_v1beta2.types.QueryList): + A list of queries. + continue_on_failure (bool): + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` + can be useful when executing independent parallel queries. + script_variables (Sequence[google.cloud.dataproc_v1beta2.types.PigJob.ScriptVariablesEntry]): + Optional. Mapping of query variable names to values + (equivalent to the Pig command: ``name=[value]``). + properties (Sequence[google.cloud.dataproc_v1beta2.types.PigJob.PropertiesEntry]): + Optional. A mapping of property names to values, used to + configure Pig. Properties that conflict with values set by + the Dataproc API may be overwritten. Can include properties + set in /etc/hadoop/conf/*-site.xml, + /etc/pig/conf/pig.properties, and classes in user code. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATH of the Pig Client and Hadoop + MapReduce (MR) tasks. Can contain Pig UDFs. + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + query_file_uri = proto.Field( + proto.STRING, + number=1, + oneof='queries', + ) + query_list = proto.Field( + proto.MESSAGE, + number=2, + oneof='queries', + message='QueryList', + ) + continue_on_failure = proto.Field( + proto.BOOL, + number=3, + ) + script_variables = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + jar_file_uris = proto.RepeatedField( + proto.STRING, + number=6, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=7, + message='LoggingConfig', + ) + + +class SparkRJob(proto.Message): + r"""A Dataproc job for running `Apache + SparkR `__ + applications on YARN. + + Attributes: + main_r_file_uri (str): + Required. The HCFS URI of the main R file to + use as the driver. Must be a .R file. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``--conf``, that can be set as + job properties, since a collision may occur that causes an + incorrect job submission. + file_uris (Sequence[str]): + Optional. HCFS URIs of files to be placed in + the working directory of each executor. Useful + for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted into the working directory of each + executor. Supported file types: .jar, .tar, + .tar.gz, .tgz, and .zip. + properties (Sequence[google.cloud.dataproc_v1beta2.types.SparkRJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure SparkR. Properties + that conflict with values set by the Dataproc + API may be overwritten. Can include properties + set in + /etc/spark/conf/spark-defaults.conf and classes + in user code. + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_r_file_uri = proto.Field( + proto.STRING, + number=1, + ) + args = proto.RepeatedField( + proto.STRING, + number=2, + ) + file_uris = proto.RepeatedField( + proto.STRING, + number=3, + ) + archive_uris = proto.RepeatedField( + proto.STRING, + number=4, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=6, + message='LoggingConfig', + ) + + +class PrestoJob(proto.Message): + r"""A Dataproc job for running `Presto `__ + queries. **IMPORTANT**: The `Dataproc Presto Optional + Component `__ + must be enabled when the cluster is created to submit a Presto job + to the cluster. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains SQL + queries. + query_list (google.cloud.dataproc_v1beta2.types.QueryList): + A list of queries. + continue_on_failure (bool): + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` + can be useful when executing independent parallel queries. + output_format (str): + Optional. The format in which query output + will be displayed. See the Presto documentation + for supported output formats + client_tags (Sequence[str]): + Optional. Presto client tags to attach to + this query + properties (Sequence[google.cloud.dataproc_v1beta2.types.PrestoJob.PropertiesEntry]): + Optional. A mapping of property names to values. Used to set + Presto `session + properties `__ + Equivalent to using the --session flag in the Presto CLI + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + query_file_uri = proto.Field( + proto.STRING, + number=1, + oneof='queries', + ) + query_list = proto.Field( + proto.MESSAGE, + number=2, + oneof='queries', + message='QueryList', + ) + continue_on_failure = proto.Field( + proto.BOOL, + number=3, + ) + output_format = proto.Field( + proto.STRING, + number=4, + ) + client_tags = proto.RepeatedField( + proto.STRING, + number=5, + ) + properties = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + logging_config = proto.Field( + proto.MESSAGE, + number=7, + message='LoggingConfig', + ) + + +class JobPlacement(proto.Message): + r"""Dataproc job config. + Attributes: + cluster_name (str): + Required. The name of the cluster where the + job will be submitted. + cluster_uuid (str): + Output only. A cluster UUID generated by the + Dataproc service when the job is submitted. + """ + + cluster_name = proto.Field( + proto.STRING, + number=1, + ) + cluster_uuid = proto.Field( + proto.STRING, + number=2, + ) + + +class JobStatus(proto.Message): + r"""Dataproc job status. + Attributes: + state (google.cloud.dataproc_v1beta2.types.JobStatus.State): + Output only. A state message specifying the + overall job state. + details (str): + Output only. Optional Job state details, such + as an error description if the state is + ERROR. + state_start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when this state was + entered. + substate (google.cloud.dataproc_v1beta2.types.JobStatus.Substate): + Output only. Additional state information, + which includes status reported by the agent. + """ + class State(proto.Enum): + r"""The job state.""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + SETUP_DONE = 8 + RUNNING = 2 + CANCEL_PENDING = 3 + CANCEL_STARTED = 7 + CANCELLED = 4 + DONE = 5 + ERROR = 6 + ATTEMPT_FAILURE = 9 + + class Substate(proto.Enum): + r"""The job substate.""" + UNSPECIFIED = 0 + SUBMITTED = 1 + QUEUED = 2 + STALE_STATUS = 3 + + state = proto.Field( + proto.ENUM, + number=1, + enum=State, + ) + details = proto.Field( + proto.STRING, + number=2, + ) + state_start_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + substate = proto.Field( + proto.ENUM, + number=7, + enum=Substate, + ) + + +class JobReference(proto.Message): + r"""Encapsulates the full scoping used to reference a job. + Attributes: + project_id (str): + Optional. The ID of the Google Cloud Platform + project that the job belongs to. If specified, + must match the request project ID. + job_id (str): + Optional. The job ID, which must be unique within the + project. The ID must contain only letters (a-z, A-Z), + numbers (0-9), underscores (_), or hyphens (-). The maximum + length is 100 characters. + + If not specified by the caller, the job ID will be provided + by the server. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + job_id = proto.Field( + proto.STRING, + number=2, + ) + + +class YarnApplication(proto.Message): + r"""A YARN application created by a job. Application information is a + subset of + org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. + + **Beta Feature**: This report is available for testing purposes + only. It may be changed before final release. + + Attributes: + name (str): + Output only. The application name. + state (google.cloud.dataproc_v1beta2.types.YarnApplication.State): + Output only. The application state. + progress (float): + Output only. The numerical progress of the + application, from 1 to 100. + tracking_url (str): + Output only. The HTTP URL of the + ApplicationMaster, HistoryServer, or + TimelineServer that provides application- + specific information. The URL uses the internal + hostname, and requires a proxy server for + resolution and, possibly, access. + """ + class State(proto.Enum): + r"""The application state, corresponding to + YarnProtos.YarnApplicationStateProto. + """ + STATE_UNSPECIFIED = 0 + NEW = 1 + NEW_SAVING = 2 + SUBMITTED = 3 + ACCEPTED = 4 + RUNNING = 5 + FINISHED = 6 + FAILED = 7 + KILLED = 8 + + name = proto.Field( + proto.STRING, + number=1, + ) + state = proto.Field( + proto.ENUM, + number=2, + enum=State, + ) + progress = proto.Field( + proto.FLOAT, + number=3, + ) + tracking_url = proto.Field( + proto.STRING, + number=4, + ) + + +class Job(proto.Message): + r"""A Dataproc job resource. + Attributes: + reference (google.cloud.dataproc_v1beta2.types.JobReference): + Optional. The fully qualified reference to the job, which + can be used to obtain the equivalent REST path of the job + resource. If this property is not specified when a job is + created, the server generates a job_id. + placement (google.cloud.dataproc_v1beta2.types.JobPlacement): + Required. Job information, including how, + when, and where to run the job. + hadoop_job (google.cloud.dataproc_v1beta2.types.HadoopJob): + Optional. Job is a Hadoop job. + spark_job (google.cloud.dataproc_v1beta2.types.SparkJob): + Optional. Job is a Spark job. + pyspark_job (google.cloud.dataproc_v1beta2.types.PySparkJob): + Optional. Job is a PySpark job. + hive_job (google.cloud.dataproc_v1beta2.types.HiveJob): + Optional. Job is a Hive job. + pig_job (google.cloud.dataproc_v1beta2.types.PigJob): + Optional. Job is a Pig job. + spark_r_job (google.cloud.dataproc_v1beta2.types.SparkRJob): + Optional. Job is a SparkR job. + spark_sql_job (google.cloud.dataproc_v1beta2.types.SparkSqlJob): + Optional. Job is a SparkSql job. + presto_job (google.cloud.dataproc_v1beta2.types.PrestoJob): + Optional. Job is a Presto job. + status (google.cloud.dataproc_v1beta2.types.JobStatus): + Output only. The job status. Additional application-specific + status information may be contained in the type_job and + yarn_applications fields. + status_history (Sequence[google.cloud.dataproc_v1beta2.types.JobStatus]): + Output only. The previous job status. + yarn_applications (Sequence[google.cloud.dataproc_v1beta2.types.YarnApplication]): + Output only. The collection of YARN applications spun up by + this job. + + **Beta** Feature: This report is available for testing + purposes only. It may be changed before final release. + submitted_by (str): + Output only. The email address of the user + submitting the job. For jobs submitted on the + cluster, the address is + username@hostname. + driver_output_resource_uri (str): + Output only. A URI pointing to the location + of the stdout of the job's driver program. + driver_control_files_uri (str): + Output only. If present, the location of miscellaneous + control files which may be used as part of job setup and + handling. If not present, control files may be placed in the + same location as ``driver_output_uri``. + labels (Sequence[google.cloud.dataproc_v1beta2.types.Job.LabelsEntry]): + Optional. The labels to associate with this job. Label + **keys** must contain 1 to 63 characters, and must conform + to `RFC 1035 `__. + Label **values** may be empty, but, if present, must contain + 1 to 63 characters, and must conform to `RFC + 1035 `__. No more than + 32 labels can be associated with a job. + scheduling (google.cloud.dataproc_v1beta2.types.JobScheduling): + Optional. Job scheduling configuration. + job_uuid (str): + Output only. A UUID that uniquely identifies a job within + the project over time. This is in contrast to a + user-settable reference.job_id that may be reused over time. + done (bool): + Output only. Indicates whether the job is completed. If the + value is ``false``, the job is still in progress. If + ``true``, the job is completed, and ``status.state`` field + will indicate if it was successful, failed, or cancelled. + """ + + reference = proto.Field( + proto.MESSAGE, + number=1, + message='JobReference', + ) + placement = proto.Field( + proto.MESSAGE, + number=2, + message='JobPlacement', + ) + hadoop_job = proto.Field( + proto.MESSAGE, + number=3, + oneof='type_job', + message='HadoopJob', + ) + spark_job = proto.Field( + proto.MESSAGE, + number=4, + oneof='type_job', + message='SparkJob', + ) + pyspark_job = proto.Field( + proto.MESSAGE, + number=5, + oneof='type_job', + message='PySparkJob', + ) + hive_job = proto.Field( + proto.MESSAGE, + number=6, + oneof='type_job', + message='HiveJob', + ) + pig_job = proto.Field( + proto.MESSAGE, + number=7, + oneof='type_job', + message='PigJob', + ) + spark_r_job = proto.Field( + proto.MESSAGE, + number=21, + oneof='type_job', + message='SparkRJob', + ) + spark_sql_job = proto.Field( + proto.MESSAGE, + number=12, + oneof='type_job', + message='SparkSqlJob', + ) + presto_job = proto.Field( + proto.MESSAGE, + number=23, + oneof='type_job', + message='PrestoJob', + ) + status = proto.Field( + proto.MESSAGE, + number=8, + message='JobStatus', + ) + status_history = proto.RepeatedField( + proto.MESSAGE, + number=13, + message='JobStatus', + ) + yarn_applications = proto.RepeatedField( + proto.MESSAGE, + number=9, + message='YarnApplication', + ) + submitted_by = proto.Field( + proto.STRING, + number=10, + ) + driver_output_resource_uri = proto.Field( + proto.STRING, + number=17, + ) + driver_control_files_uri = proto.Field( + proto.STRING, + number=15, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=18, + ) + scheduling = proto.Field( + proto.MESSAGE, + number=20, + message='JobScheduling', + ) + job_uuid = proto.Field( + proto.STRING, + number=22, + ) + done = proto.Field( + proto.BOOL, + number=24, + ) + + +class JobScheduling(proto.Message): + r"""Job scheduling options. + Attributes: + max_failures_per_hour (int): + Optional. Maximum number of times per hour a + driver may be restarted as a result of driver + terminating with non-zero code before job is + reported failed. + + A job may be reported as thrashing if driver + exits with non-zero code 4 times within 10 + minute window. + + Maximum value is 10. + """ + + max_failures_per_hour = proto.Field( + proto.INT32, + number=1, + ) + + +class JobMetadata(proto.Message): + r"""Job Operation metadata. + Attributes: + job_id (str): + Output only. The job id. + status (google.cloud.dataproc_v1beta2.types.JobStatus): + Output only. Most recent job status. + operation_type (str): + Output only. Operation type. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Job submission time. + """ + + job_id = proto.Field( + proto.STRING, + number=1, + ) + status = proto.Field( + proto.MESSAGE, + number=2, + message='JobStatus', + ) + operation_type = proto.Field( + proto.STRING, + number=3, + ) + start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class SubmitJobRequest(proto.Message): + r"""A request to submit a job. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job (google.cloud.dataproc_v1beta2.types.Job): + Required. The job resource. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] + requests with the same id, then the second request will be + ignored and the first + [Job][google.cloud.dataproc.v1beta2.Job] created and stored + in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + job = proto.Field( + proto.MESSAGE, + number=2, + message='Job', + ) + request_id = proto.Field( + proto.STRING, + number=4, + ) + + +class GetJobRequest(proto.Message): + r"""A request to get the resource representation for a job in a + project. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + job_id = proto.Field( + proto.STRING, + number=2, + ) + + +class ListJobsRequest(proto.Message): + r"""A request to list jobs in a project. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + page_size (int): + Optional. The number of results to return in + each response. + page_token (str): + Optional. The page token, returned by a + previous call, to request the next page of + results. + cluster_name (str): + Optional. If set, the returned jobs list + includes only jobs that were submitted to the + named cluster. + job_state_matcher (google.cloud.dataproc_v1beta2.types.ListJobsRequest.JobStateMatcher): + Optional. Specifies enumerated categories of jobs to list. + (default = match ALL jobs). + + If ``filter`` is provided, ``jobStateMatcher`` will be + ignored. + filter (str): + Optional. A filter constraining the jobs to list. Filters + are case-sensitive and have the following syntax: + + [field = value] AND [field [= value]] ... + + where **field** is ``status.state`` or ``labels.[KEY]``, and + ``[KEY]`` is a label key. **value** can be ``*`` to match + all values. ``status.state`` can be either ``ACTIVE`` or + ``NON_ACTIVE``. Only the logical ``AND`` operator is + supported; space-separated items are treated as having an + implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND labels.env = staging AND + labels.starred = \* + """ + class JobStateMatcher(proto.Enum): + r"""A matcher that specifies categories of job states.""" + ALL = 0 + ACTIVE = 1 + NON_ACTIVE = 2 + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=6, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + cluster_name = proto.Field( + proto.STRING, + number=4, + ) + job_state_matcher = proto.Field( + proto.ENUM, + number=5, + enum=JobStateMatcher, + ) + filter = proto.Field( + proto.STRING, + number=7, + ) + + +class UpdateJobRequest(proto.Message): + r"""A request to update a job. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + job (google.cloud.dataproc_v1beta2.types.Job): + Required. The changes to the job. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Specifies the path, relative to Job, of the field + to update. For example, to update the labels of a Job the + update_mask parameter would be specified as labels, and the + ``PATCH`` request body would specify the new value. Note: + Currently, labels is the only field that can be updated. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=2, + ) + job_id = proto.Field( + proto.STRING, + number=3, + ) + job = proto.Field( + proto.MESSAGE, + number=4, + message='Job', + ) + update_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListJobsResponse(proto.Message): + r"""A list of jobs in a project. + Attributes: + jobs (Sequence[google.cloud.dataproc_v1beta2.types.Job]): + Output only. Jobs list. + next_page_token (str): + Optional. This token is included in the response if there + are more results to fetch. To fetch additional results, + provide this value as the ``page_token`` in a subsequent + ListJobsRequest. + """ + + @property + def raw_page(self): + return self + + jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Job', + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class CancelJobRequest(proto.Message): + r"""A request to cancel a job. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + job_id = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteJobRequest(proto.Message): + r"""A request to delete a job. + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + region = proto.Field( + proto.STRING, + number=3, + ) + job_id = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/operations.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/operations.py new file mode 100644 index 00000000..3d2d2471 --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/operations.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.dataproc.v1beta2', + manifest={ + 'ClusterOperationStatus', + 'ClusterOperationMetadata', + }, +) + + +class ClusterOperationStatus(proto.Message): + r"""The status of the operation. + Attributes: + state (google.cloud.dataproc_v1beta2.types.ClusterOperationStatus.State): + Output only. A message containing the + operation state. + inner_state (str): + Output only. A message containing the + detailed operation state. + details (str): + Output only. A message containing any + operation metadata details. + state_start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time this state was entered. + """ + class State(proto.Enum): + r"""The operation state.""" + UNKNOWN = 0 + PENDING = 1 + RUNNING = 2 + DONE = 3 + + state = proto.Field( + proto.ENUM, + number=1, + enum=State, + ) + inner_state = proto.Field( + proto.STRING, + number=2, + ) + details = proto.Field( + proto.STRING, + number=3, + ) + state_start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class ClusterOperationMetadata(proto.Message): + r"""Metadata describing the operation. + Attributes: + cluster_name (str): + Output only. Name of the cluster for the + operation. + cluster_uuid (str): + Output only. Cluster UUID for the operation. + status (google.cloud.dataproc_v1beta2.types.ClusterOperationStatus): + Output only. Current operation status. + status_history (Sequence[google.cloud.dataproc_v1beta2.types.ClusterOperationStatus]): + Output only. The previous operation status. + operation_type (str): + Output only. The operation type. + description (str): + Output only. Short description of operation. + labels (Sequence[google.cloud.dataproc_v1beta2.types.ClusterOperationMetadata.LabelsEntry]): + Output only. Labels associated with the + operation + warnings (Sequence[str]): + Output only. Errors encountered during + operation execution. + """ + + cluster_name = proto.Field( + proto.STRING, + number=7, + ) + cluster_uuid = proto.Field( + proto.STRING, + number=8, + ) + status = proto.Field( + proto.MESSAGE, + number=9, + message='ClusterOperationStatus', + ) + status_history = proto.RepeatedField( + proto.MESSAGE, + number=10, + message='ClusterOperationStatus', + ) + operation_type = proto.Field( + proto.STRING, + number=11, + ) + description = proto.Field( + proto.STRING, + number=12, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=13, + ) + warnings = proto.RepeatedField( + proto.STRING, + number=14, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/shared.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/shared.py new file mode 100644 index 00000000..2a9671ba --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/shared.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.dataproc.v1beta2', + manifest={ + 'Component', + }, +) + + +class Component(proto.Enum): + r"""Cluster components that can be activated.""" + COMPONENT_UNSPECIFIED = 0 + ANACONDA = 5 + DRUID = 9 + HBASE = 11 + HIVE_WEBHCAT = 3 + JUPYTER = 1 + KERBEROS = 7 + PRESTO = 6 + RANGER = 12 + SOLR = 10 + ZEPPELIN = 4 + ZOOKEEPER = 8 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/workflow_templates.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/workflow_templates.py new file mode 100644 index 00000000..b159829e --- /dev/null +++ b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/workflow_templates.py @@ -0,0 +1,1073 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.dataproc_v1beta2.types import clusters +from google.cloud.dataproc_v1beta2.types import jobs as gcd_jobs +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.dataproc.v1beta2', + manifest={ + 'WorkflowTemplate', + 'WorkflowTemplatePlacement', + 'ManagedCluster', + 'ClusterSelector', + 'OrderedJob', + 'TemplateParameter', + 'ParameterValidation', + 'RegexValidation', + 'ValueValidation', + 'WorkflowMetadata', + 'ClusterOperation', + 'WorkflowGraph', + 'WorkflowNode', + 'CreateWorkflowTemplateRequest', + 'GetWorkflowTemplateRequest', + 'InstantiateWorkflowTemplateRequest', + 'InstantiateInlineWorkflowTemplateRequest', + 'UpdateWorkflowTemplateRequest', + 'ListWorkflowTemplatesRequest', + 'ListWorkflowTemplatesResponse', + 'DeleteWorkflowTemplateRequest', + }, +) + + +class WorkflowTemplate(proto.Message): + r"""A Dataproc workflow template resource. + Attributes: + id (str): + Required. The template id. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). Cannot begin or end with + underscore or hyphen. Must consist of between 3 and 50 + characters. + + . + name (str): + Output only. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates``, the resource + name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates``, the + resource name of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. Used to perform a consistent read-modify-write. + + This field should be left blank for a + ``CreateWorkflowTemplate`` request. It is required for an + ``UpdateWorkflowTemplate`` request, and must match the + current server version. A typical update template flow would + fetch the current template with a ``GetWorkflowTemplate`` + request, which will return the current template with the + ``version`` field filled in with the current server version. + The user updates other fields in the template, then returns + it as part of the ``UpdateWorkflowTemplate`` request. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time template was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time template was last + updated. + labels (Sequence[google.cloud.dataproc_v1beta2.types.WorkflowTemplate.LabelsEntry]): + Optional. The labels to associate with this template. These + labels will be propagated to all jobs and clusters created + by the workflow instance. + + Label **keys** must contain 1 to 63 characters, and must + conform to `RFC + 1035 `__. + + Label **values** may be empty, but, if present, must contain + 1 to 63 characters, and must conform to `RFC + 1035 `__. + + No more than 32 labels can be associated with a template. + placement (google.cloud.dataproc_v1beta2.types.WorkflowTemplatePlacement): + Required. WorkflowTemplate scheduling + information. + jobs (Sequence[google.cloud.dataproc_v1beta2.types.OrderedJob]): + Required. The Directed Acyclic Graph of Jobs + to submit. + parameters (Sequence[google.cloud.dataproc_v1beta2.types.TemplateParameter]): + Optional. Template parameters whose values + are substituted into the template. Values for + parameters must be provided when the template is + instantiated. + dag_timeout (google.protobuf.duration_pb2.Duration): + Optional. Timeout duration for the DAG of jobs. You can use + "s", "m", "h", and "d" suffixes for second, minute, hour, + and day duration values, respectively. The timeout duration + must be from 10 minutes ("10m") to 24 hours ("24h" or "1d"). + The timer begins when the first job is submitted. If the + workflow is running at the end of the timeout period, any + remaining jobs are cancelled, the workflow is terminated, + and if the workflow was running on a `managed + cluster `__, + the cluster is deleted. + """ + + id = proto.Field( + proto.STRING, + number=2, + ) + name = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.INT32, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + placement = proto.Field( + proto.MESSAGE, + number=7, + message='WorkflowTemplatePlacement', + ) + jobs = proto.RepeatedField( + proto.MESSAGE, + number=8, + message='OrderedJob', + ) + parameters = proto.RepeatedField( + proto.MESSAGE, + number=9, + message='TemplateParameter', + ) + dag_timeout = proto.Field( + proto.MESSAGE, + number=10, + message=duration_pb2.Duration, + ) + + +class WorkflowTemplatePlacement(proto.Message): + r"""Specifies workflow execution target. + + Either ``managed_cluster`` or ``cluster_selector`` is required. + + Attributes: + managed_cluster (google.cloud.dataproc_v1beta2.types.ManagedCluster): + Optional. A cluster that is managed by the + workflow. + cluster_selector (google.cloud.dataproc_v1beta2.types.ClusterSelector): + Optional. A selector that chooses target + cluster for jobs based on metadata. + + The selector is evaluated at the time each job + is submitted. + """ + + managed_cluster = proto.Field( + proto.MESSAGE, + number=1, + oneof='placement', + message='ManagedCluster', + ) + cluster_selector = proto.Field( + proto.MESSAGE, + number=2, + oneof='placement', + message='ClusterSelector', + ) + + +class ManagedCluster(proto.Message): + r"""Cluster that is managed by the workflow. + Attributes: + cluster_name (str): + Required. The cluster name prefix. A unique + cluster name will be formed by appending a + random suffix. + The name must contain only lower-case letters + (a-z), numbers (0-9), and hyphens (-). Must + begin with a letter. Cannot begin or end with + hyphen. Must consist of between 2 and 35 + characters. + config (google.cloud.dataproc_v1beta2.types.ClusterConfig): + Required. The cluster configuration. + labels (Sequence[google.cloud.dataproc_v1beta2.types.ManagedCluster.LabelsEntry]): + Optional. The labels to associate with this cluster. + + Label keys must be between 1 and 63 characters long, and + must conform to the following PCRE regular expression: + [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + + Label values must be between 1 and 63 characters long, and + must conform to the following PCRE regular expression: + [\p{Ll}\p{Lo}\p{N}_-]{0,63} + + No more than 32 labels can be associated with a given + cluster. + """ + + cluster_name = proto.Field( + proto.STRING, + number=2, + ) + config = proto.Field( + proto.MESSAGE, + number=3, + message=clusters.ClusterConfig, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + + +class ClusterSelector(proto.Message): + r"""A selector that chooses target cluster for jobs based on + metadata. + + Attributes: + zone (str): + Optional. The zone where workflow process + executes. This parameter does not affect the + selection of the cluster. + If unspecified, the zone of the first cluster + matching the selector is used. + cluster_labels (Sequence[google.cloud.dataproc_v1beta2.types.ClusterSelector.ClusterLabelsEntry]): + Required. The cluster labels. Cluster must + have all labels to match. + """ + + zone = proto.Field( + proto.STRING, + number=1, + ) + cluster_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + + +class OrderedJob(proto.Message): + r"""A job executed by the workflow. + Attributes: + step_id (str): + Required. The step id. The id must be unique among all jobs + within the template. + + The step id is used as prefix for job id, as job + ``goog-dataproc-workflow-step-id`` label, and in + [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] + field from other steps. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). Cannot begin or end with + underscore or hyphen. Must consist of between 3 and 50 + characters. + hadoop_job (google.cloud.dataproc_v1beta2.types.HadoopJob): + Optional. Job is a Hadoop job. + spark_job (google.cloud.dataproc_v1beta2.types.SparkJob): + Optional. Job is a Spark job. + pyspark_job (google.cloud.dataproc_v1beta2.types.PySparkJob): + Optional. Job is a PySpark job. + hive_job (google.cloud.dataproc_v1beta2.types.HiveJob): + Optional. Job is a Hive job. + pig_job (google.cloud.dataproc_v1beta2.types.PigJob): + Optional. Job is a Pig job. + spark_r_job (google.cloud.dataproc_v1beta2.types.SparkRJob): + Optional. Job is a SparkR job. + spark_sql_job (google.cloud.dataproc_v1beta2.types.SparkSqlJob): + Optional. Job is a SparkSql job. + presto_job (google.cloud.dataproc_v1beta2.types.PrestoJob): + Optional. Job is a Presto job. + labels (Sequence[google.cloud.dataproc_v1beta2.types.OrderedJob.LabelsEntry]): + Optional. The labels to associate with this job. + + Label keys must be between 1 and 63 characters long, and + must conform to the following regular expression: + [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + + Label values must be between 1 and 63 characters long, and + must conform to the following regular expression: + [\p{Ll}\p{Lo}\p{N}_-]{0,63} + + No more than 32 labels can be associated with a given job. + scheduling (google.cloud.dataproc_v1beta2.types.JobScheduling): + Optional. Job scheduling configuration. + prerequisite_step_ids (Sequence[str]): + Optional. The optional list of prerequisite job step_ids. If + not specified, the job will start at the beginning of + workflow. + """ + + step_id = proto.Field( + proto.STRING, + number=1, + ) + hadoop_job = proto.Field( + proto.MESSAGE, + number=2, + oneof='job_type', + message=gcd_jobs.HadoopJob, + ) + spark_job = proto.Field( + proto.MESSAGE, + number=3, + oneof='job_type', + message=gcd_jobs.SparkJob, + ) + pyspark_job = proto.Field( + proto.MESSAGE, + number=4, + oneof='job_type', + message=gcd_jobs.PySparkJob, + ) + hive_job = proto.Field( + proto.MESSAGE, + number=5, + oneof='job_type', + message=gcd_jobs.HiveJob, + ) + pig_job = proto.Field( + proto.MESSAGE, + number=6, + oneof='job_type', + message=gcd_jobs.PigJob, + ) + spark_r_job = proto.Field( + proto.MESSAGE, + number=11, + oneof='job_type', + message=gcd_jobs.SparkRJob, + ) + spark_sql_job = proto.Field( + proto.MESSAGE, + number=7, + oneof='job_type', + message=gcd_jobs.SparkSqlJob, + ) + presto_job = proto.Field( + proto.MESSAGE, + number=12, + oneof='job_type', + message=gcd_jobs.PrestoJob, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + scheduling = proto.Field( + proto.MESSAGE, + number=9, + message=gcd_jobs.JobScheduling, + ) + prerequisite_step_ids = proto.RepeatedField( + proto.STRING, + number=10, + ) + + +class TemplateParameter(proto.Message): + r"""A configurable parameter that replaces one or more fields in + the template. Parameterizable fields: + - Labels + - File uris + - Job properties + - Job arguments + - Script variables + - Main class (in HadoopJob and SparkJob) + - Zone (in ClusterSelector) + + Attributes: + name (str): + Required. Parameter name. The parameter name is used as the + key, and paired with the parameter value, which are passed + to the template when the template is instantiated. The name + must contain only capital letters (A-Z), numbers (0-9), and + underscores (_), and must not start with a number. The + maximum length is 40 characters. + fields (Sequence[str]): + Required. Paths to all fields that the parameter replaces. A + field is allowed to appear in at most one parameter's list + of field paths. + + A field path is similar in syntax to a + [google.protobuf.FieldMask][google.protobuf.FieldMask]. For + example, a field path that references the zone field of a + workflow template's cluster selector would be specified as + ``placement.clusterSelector.zone``. + + Also, field paths can reference fields using the following + syntax: + + - Values in maps can be referenced by key: + + - labels['key'] + - placement.clusterSelector.clusterLabels['key'] + - placement.managedCluster.labels['key'] + - placement.clusterSelector.clusterLabels['key'] + - jobs['step-id'].labels['key'] + + - Jobs in the jobs list can be referenced by step-id: + + - jobs['step-id'].hadoopJob.mainJarFileUri + - jobs['step-id'].hiveJob.queryFileUri + - jobs['step-id'].pySparkJob.mainPythonFileUri + - jobs['step-id'].hadoopJob.jarFileUris[0] + - jobs['step-id'].hadoopJob.archiveUris[0] + - jobs['step-id'].hadoopJob.fileUris[0] + - jobs['step-id'].pySparkJob.pythonFileUris[0] + + - Items in repeated fields can be referenced by a + zero-based index: + + - jobs['step-id'].sparkJob.args[0] + + - Other examples: + + - jobs['step-id'].hadoopJob.properties['key'] + - jobs['step-id'].hadoopJob.args[0] + - jobs['step-id'].hiveJob.scriptVariables['key'] + - jobs['step-id'].hadoopJob.mainJarFileUri + - placement.clusterSelector.zone + + It may not be possible to parameterize maps and repeated + fields in their entirety since only individual map values + and individual items in repeated fields can be referenced. + For example, the following field paths are invalid: + + - placement.clusterSelector.clusterLabels + - jobs['step-id'].sparkJob.args + description (str): + Optional. Brief description of the parameter. + Must not exceed 1024 characters. + validation (google.cloud.dataproc_v1beta2.types.ParameterValidation): + Optional. Validation rules to be applied to + this parameter's value. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + fields = proto.RepeatedField( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + validation = proto.Field( + proto.MESSAGE, + number=4, + message='ParameterValidation', + ) + + +class ParameterValidation(proto.Message): + r"""Configuration for parameter validation. + Attributes: + regex (google.cloud.dataproc_v1beta2.types.RegexValidation): + Validation based on regular expressions. + values (google.cloud.dataproc_v1beta2.types.ValueValidation): + Validation based on a list of allowed values. + """ + + regex = proto.Field( + proto.MESSAGE, + number=1, + oneof='validation_type', + message='RegexValidation', + ) + values = proto.Field( + proto.MESSAGE, + number=2, + oneof='validation_type', + message='ValueValidation', + ) + + +class RegexValidation(proto.Message): + r"""Validation based on regular expressions. + Attributes: + regexes (Sequence[str]): + Required. RE2 regular expressions used to + validate the parameter's value. The value must + match the regex in its entirety (substring + matches are not sufficient). + """ + + regexes = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class ValueValidation(proto.Message): + r"""Validation based on a list of allowed values. + Attributes: + values (Sequence[str]): + Required. List of allowed values for the + parameter. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class WorkflowMetadata(proto.Message): + r"""A Dataproc workflow template resource. + Attributes: + template (str): + Output only. The resource name of the workflow template as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates``, the resource + name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates``, the + resource name of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Output only. The version of template at the + time of workflow instantiation. + create_cluster (google.cloud.dataproc_v1beta2.types.ClusterOperation): + Output only. The create cluster operation + metadata. + graph (google.cloud.dataproc_v1beta2.types.WorkflowGraph): + Output only. The workflow graph. + delete_cluster (google.cloud.dataproc_v1beta2.types.ClusterOperation): + Output only. The delete cluster operation + metadata. + state (google.cloud.dataproc_v1beta2.types.WorkflowMetadata.State): + Output only. The workflow state. + cluster_name (str): + Output only. The name of the target cluster. + parameters (Sequence[google.cloud.dataproc_v1beta2.types.WorkflowMetadata.ParametersEntry]): + Map from parameter names to values that were + used for those parameters. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Workflow start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Workflow end time. + cluster_uuid (str): + Output only. The UUID of target cluster. + dag_timeout (google.protobuf.duration_pb2.Duration): + Output only. The timeout duration for the DAG of jobs. + Minimum timeout duration is 10 minutes and maximum is 24 + hours, expressed as a + [google.protobuf.Duration][https://developers.google.com/protocol-buffers/docs/proto3#json_mapping]. + For example, "1800" = 1800 seconds/30 minutes duration. + dag_start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. DAG start time, which is only set for workflows + with + [dag_timeout][google.cloud.dataproc.v1beta2.WorkflowMetadata.dag_timeout] + when the DAG begins. + dag_end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. DAG end time, which is only set for workflows + with + [dag_timeout][google.cloud.dataproc.v1beta2.WorkflowMetadata.dag_timeout] + when the DAG ends. + """ + class State(proto.Enum): + r"""The operation state.""" + UNKNOWN = 0 + PENDING = 1 + RUNNING = 2 + DONE = 3 + + template = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.INT32, + number=2, + ) + create_cluster = proto.Field( + proto.MESSAGE, + number=3, + message='ClusterOperation', + ) + graph = proto.Field( + proto.MESSAGE, + number=4, + message='WorkflowGraph', + ) + delete_cluster = proto.Field( + proto.MESSAGE, + number=5, + message='ClusterOperation', + ) + state = proto.Field( + proto.ENUM, + number=6, + enum=State, + ) + cluster_name = proto.Field( + proto.STRING, + number=7, + ) + parameters = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + start_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + cluster_uuid = proto.Field( + proto.STRING, + number=11, + ) + dag_timeout = proto.Field( + proto.MESSAGE, + number=12, + message=duration_pb2.Duration, + ) + dag_start_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + dag_end_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + + +class ClusterOperation(proto.Message): + r"""The cluster operation triggered by a workflow. + Attributes: + operation_id (str): + Output only. The id of the cluster operation. + error (str): + Output only. Error, if operation failed. + done (bool): + Output only. Indicates the operation is done. + """ + + operation_id = proto.Field( + proto.STRING, + number=1, + ) + error = proto.Field( + proto.STRING, + number=2, + ) + done = proto.Field( + proto.BOOL, + number=3, + ) + + +class WorkflowGraph(proto.Message): + r"""The workflow graph. + Attributes: + nodes (Sequence[google.cloud.dataproc_v1beta2.types.WorkflowNode]): + Output only. The workflow nodes. + """ + + nodes = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='WorkflowNode', + ) + + +class WorkflowNode(proto.Message): + r"""The workflow node. + Attributes: + step_id (str): + Output only. The name of the node. + prerequisite_step_ids (Sequence[str]): + Output only. Node's prerequisite nodes. + job_id (str): + Output only. The job id; populated after the + node enters RUNNING state. + state (google.cloud.dataproc_v1beta2.types.WorkflowNode.NodeState): + Output only. The node state. + error (str): + Output only. The error detail. + """ + class NodeState(proto.Enum): + r"""The workflow node state.""" + NODE_STATUS_UNSPECIFIED = 0 + BLOCKED = 1 + RUNNABLE = 2 + RUNNING = 3 + COMPLETED = 4 + FAILED = 5 + + step_id = proto.Field( + proto.STRING, + number=1, + ) + prerequisite_step_ids = proto.RepeatedField( + proto.STRING, + number=2, + ) + job_id = proto.Field( + proto.STRING, + number=3, + ) + state = proto.Field( + proto.ENUM, + number=5, + enum=NodeState, + ) + error = proto.Field( + proto.STRING, + number=6, + ) + + +class CreateWorkflowTemplateRequest(proto.Message): + r"""A request to create a workflow template. + Attributes: + parent (str): + Required. The resource name of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, the + resource name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): + Required. The Dataproc workflow template to + create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + template = proto.Field( + proto.MESSAGE, + number=2, + message='WorkflowTemplate', + ) + + +class GetWorkflowTemplateRequest(proto.Message): + r"""A request to fetch a workflow template. + Attributes: + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the + resource name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the + resource name of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. The version of workflow template to + retrieve. Only previously instantiated versions + can be retrieved. + If unspecified, retrieves the current version. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.INT32, + number=2, + ) + + +class InstantiateWorkflowTemplateRequest(proto.Message): + r"""A request to instantiate a workflow template. + Attributes: + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. The version of workflow template to + instantiate. If specified, the workflow will be + instantiated only if the current version of the + workflow template has the supplied version. + This option cannot be used to instantiate a + previous version of workflow template. + instance_id (str): + Deprecated. Please use ``request_id`` field instead. + request_id (str): + Optional. A tag that prevents multiple concurrent workflow + instances with the same tag from running. This mitigates + risk of concurrent instances started due to retries. + + It is recommended to always set this value to a + `UUID `__. + + The tag must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + parameters (Sequence[google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest.ParametersEntry]): + Optional. Map from parameter names to values + that should be used for those parameters. Values + may not exceed 100 characters. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.INT32, + number=2, + ) + instance_id = proto.Field( + proto.STRING, + number=3, + ) + request_id = proto.Field( + proto.STRING, + number=5, + ) + parameters = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + + +class InstantiateInlineWorkflowTemplateRequest(proto.Message): + r"""A request to instantiate an inline workflow template. + Attributes: + parent (str): + Required. The resource name of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates,instantiateinline``, + the resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.workflowTemplates.instantiateinline``, + the resource name of the location has the following + format: ``projects/{project_id}/locations/{location}`` + template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): + Required. The workflow template to + instantiate. + instance_id (str): + Deprecated. Please use ``request_id`` field instead. + request_id (str): + Optional. A tag that prevents multiple concurrent workflow + instances with the same tag from running. This mitigates + risk of concurrent instances started due to retries. + + It is recommended to always set this value to a + `UUID `__. + + The tag must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + template = proto.Field( + proto.MESSAGE, + number=2, + message='WorkflowTemplate', + ) + instance_id = proto.Field( + proto.STRING, + number=3, + ) + request_id = proto.Field( + proto.STRING, + number=4, + ) + + +class UpdateWorkflowTemplateRequest(proto.Message): + r"""A request to update a workflow template. + Attributes: + template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): + Required. The updated workflow template. + + The ``template.version`` field must match the current + version. + """ + + template = proto.Field( + proto.MESSAGE, + number=1, + message='WorkflowTemplate', + ) + + +class ListWorkflowTemplatesRequest(proto.Message): + r"""A request to list workflow templates in a project. + Attributes: + parent (str): + Required. The resource name of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.list``, the + resource name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + page_size (int): + Optional. The maximum number of results to + return in each response. + page_token (str): + Optional. The page token, returned by a + previous call, to request the next page of + results. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + + +class ListWorkflowTemplatesResponse(proto.Message): + r"""A response to a request to list workflow templates in a + project. + + Attributes: + templates (Sequence[google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): + Output only. WorkflowTemplates list. + next_page_token (str): + Output only. This token is included in the response if there + are more results to fetch. To fetch additional results, + provide this value as the page_token in a subsequent + ListWorkflowTemplatesRequest. + """ + + @property + def raw_page(self): + return self + + templates = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='WorkflowTemplate', + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteWorkflowTemplateRequest(proto.Message): + r"""A request to delete a workflow template. + Currently started workflows will remain running. + + Attributes: + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.delete``, the + resource name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. The version of workflow template to + delete. If specified, will only delete the + template if the current server version matches + specified version. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta2/mypy.ini b/owl-bot-staging/v1beta2/mypy.ini new file mode 100644 index 00000000..4505b485 --- /dev/null +++ b/owl-bot-staging/v1beta2/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1beta2/noxfile.py b/owl-bot-staging/v1beta2/noxfile.py new file mode 100644 index 00000000..95c232be --- /dev/null +++ b/owl-bot-staging/v1beta2/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/dataproc_v1beta2/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1beta2/scripts/fixup_dataproc_v1beta2_keywords.py b/owl-bot-staging/v1beta2/scripts/fixup_dataproc_v1beta2_keywords.py new file mode 100644 index 00000000..b9e52549 --- /dev/null +++ b/owl-bot-staging/v1beta2/scripts/fixup_dataproc_v1beta2_keywords.py @@ -0,0 +1,200 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class dataprocCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'cancel_job': ('project_id', 'region', 'job_id', ), + 'create_autoscaling_policy': ('parent', 'policy', ), + 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', ), + 'create_workflow_template': ('parent', 'template', ), + 'delete_autoscaling_policy': ('name', ), + 'delete_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), + 'delete_job': ('project_id', 'region', 'job_id', ), + 'delete_workflow_template': ('name', 'version', ), + 'diagnose_cluster': ('project_id', 'region', 'cluster_name', ), + 'get_autoscaling_policy': ('name', ), + 'get_cluster': ('project_id', 'region', 'cluster_name', ), + 'get_job': ('project_id', 'region', 'job_id', ), + 'get_workflow_template': ('name', 'version', ), + 'instantiate_inline_workflow_template': ('parent', 'template', 'instance_id', 'request_id', ), + 'instantiate_workflow_template': ('name', 'version', 'instance_id', 'request_id', 'parameters', ), + 'list_autoscaling_policies': ('parent', 'page_size', 'page_token', ), + 'list_clusters': ('project_id', 'region', 'filter', 'page_size', 'page_token', ), + 'list_jobs': ('project_id', 'region', 'page_size', 'page_token', 'cluster_name', 'job_state_matcher', 'filter', ), + 'list_workflow_templates': ('parent', 'page_size', 'page_token', ), + 'submit_job': ('project_id', 'region', 'job', 'request_id', ), + 'submit_job_as_operation': ('project_id', 'region', 'job', 'request_id', ), + 'update_autoscaling_policy': ('policy', ), + 'update_cluster': ('project_id', 'region', 'cluster_name', 'cluster', 'update_mask', 'graceful_decommission_timeout', 'request_id', ), + 'update_job': ('project_id', 'region', 'job_id', 'job', 'update_mask', ), + 'update_workflow_template': ('template', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=dataprocCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the dataproc client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta2/setup.py b/owl-bot-staging/v1beta2/setup.py new file mode 100644 index 00000000..63f4596f --- /dev/null +++ b/owl-bot-staging/v1beta2/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-dataproc', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1beta2/tests/__init__.py b/owl-bot-staging/v1beta2/tests/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1beta2/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta2/tests/unit/__init__.py b/owl-bot-staging/v1beta2/tests/unit/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1beta2/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta2/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta2/tests/unit/gapic/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1beta2/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/__init__.py b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py new file mode 100644 index 00000000..435a442e --- /dev/null +++ b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py @@ -0,0 +1,2293 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import AutoscalingPolicyServiceAsyncClient +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import AutoscalingPolicyServiceClient +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import pagers +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import transports +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.dataproc_v1beta2.types import autoscaling_policies +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(None) is None + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + AutoscalingPolicyServiceClient, + AutoscalingPolicyServiceAsyncClient, +]) +def test_autoscaling_policy_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + AutoscalingPolicyServiceClient, + AutoscalingPolicyServiceAsyncClient, +]) +def test_autoscaling_policy_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), + (transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_autoscaling_policy_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + AutoscalingPolicyServiceClient, + AutoscalingPolicyServiceAsyncClient, +]) +def test_autoscaling_policy_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_autoscaling_policy_service_client_get_transport_class(): + transport = AutoscalingPolicyServiceClient.get_transport_class() + available_transports = [ + transports.AutoscalingPolicyServiceGrpcTransport, + ] + assert transport in available_transports + + transport = AutoscalingPolicyServiceClient.get_transport_class("grpc") + assert transport == transports.AutoscalingPolicyServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), + (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(AutoscalingPolicyServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceClient)) +@mock.patch.object(AutoscalingPolicyServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceAsyncClient)) +def test_autoscaling_policy_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(AutoscalingPolicyServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(AutoscalingPolicyServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc", "true"), + (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc", "false"), + (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(AutoscalingPolicyServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceClient)) +@mock.patch.object(AutoscalingPolicyServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_autoscaling_policy_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), + (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_autoscaling_policy_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), + (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_autoscaling_policy_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_autoscaling_policy_service_client_client_options_from_dict(): + with mock.patch('google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = AutoscalingPolicyServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.CreateAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy( + id='id_value', + name='name_value', + basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(graceful_decommission_timeout=duration_pb2.Duration(seconds=751))), + ) + response = client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + assert response.id == 'id_value' + assert response.name == 'name_value' + + +def test_create_autoscaling_policy_from_dict(): + test_create_autoscaling_policy(request_type=dict) + + +def test_create_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + client.create_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.CreateAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy( + id='id_value', + name='name_value', + )) + response = await client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + assert response.id == 'id_value' + assert response.name == 'name_value' + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_async_from_dict(): + await test_create_autoscaling_policy_async(request_type=dict) + + +def test_create_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.CreateAutoscalingPolicyRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + call.return_value = autoscaling_policies.AutoscalingPolicy() + client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.CreateAutoscalingPolicyRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) + await client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_autoscaling_policy( + parent='parent_value', + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') + + +def test_create_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_autoscaling_policy( + autoscaling_policies.CreateAutoscalingPolicyRequest(), + parent='parent_value', + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_autoscaling_policy( + parent='parent_value', + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_autoscaling_policy( + autoscaling_policies.CreateAutoscalingPolicyRequest(), + parent='parent_value', + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + +def test_update_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.UpdateAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy( + id='id_value', + name='name_value', + basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(graceful_decommission_timeout=duration_pb2.Duration(seconds=751))), + ) + response = client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + assert response.id == 'id_value' + assert response.name == 'name_value' + + +def test_update_autoscaling_policy_from_dict(): + test_update_autoscaling_policy(request_type=dict) + + +def test_update_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + client.update_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.UpdateAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy( + id='id_value', + name='name_value', + )) + response = await client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + assert response.id == 'id_value' + assert response.name == 'name_value' + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_async_from_dict(): + await test_update_autoscaling_policy_async(request_type=dict) + + +def test_update_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.UpdateAutoscalingPolicyRequest() + + request.policy.name = 'policy.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + call.return_value = autoscaling_policies.AutoscalingPolicy() + client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'policy.name=policy.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.UpdateAutoscalingPolicyRequest() + + request.policy.name = 'policy.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) + await client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'policy.name=policy.name/value', + ) in kw['metadata'] + + +def test_update_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_autoscaling_policy( + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') + + +def test_update_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_autoscaling_policy( + autoscaling_policies.UpdateAutoscalingPolicyRequest(), + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_autoscaling_policy( + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_autoscaling_policy( + autoscaling_policies.UpdateAutoscalingPolicyRequest(), + policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), + ) + + +def test_get_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.GetAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy( + id='id_value', + name='name_value', + basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(graceful_decommission_timeout=duration_pb2.Duration(seconds=751))), + ) + response = client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + assert response.id == 'id_value' + assert response.name == 'name_value' + + +def test_get_autoscaling_policy_from_dict(): + test_get_autoscaling_policy(request_type=dict) + + +def test_get_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + client.get_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.GetAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy( + id='id_value', + name='name_value', + )) + response = await client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + assert response.id == 'id_value' + assert response.name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_async_from_dict(): + await test_get_autoscaling_policy_async(request_type=dict) + + +def test_get_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.GetAutoscalingPolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + call.return_value = autoscaling_policies.AutoscalingPolicy() + client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.GetAutoscalingPolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) + await client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_autoscaling_policy( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_autoscaling_policy( + autoscaling_policies.GetAutoscalingPolicyRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_autoscaling_policy( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_autoscaling_policy( + autoscaling_policies.GetAutoscalingPolicyRequest(), + name='name_value', + ) + + +def test_list_autoscaling_policies(transport: str = 'grpc', request_type=autoscaling_policies.ListAutoscalingPoliciesRequest): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAutoscalingPoliciesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_autoscaling_policies_from_dict(): + test_list_autoscaling_policies(request_type=dict) + + +def test_list_autoscaling_policies_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + client.list_autoscaling_policies() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.ListAutoscalingPoliciesRequest): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.ListAutoscalingPoliciesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAutoscalingPoliciesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async_from_dict(): + await test_list_autoscaling_policies_async(request_type=dict) + + +def test_list_autoscaling_policies_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.ListAutoscalingPoliciesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() + client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.ListAutoscalingPoliciesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.ListAutoscalingPoliciesResponse()) + await client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_autoscaling_policies_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_autoscaling_policies( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_autoscaling_policies_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_autoscaling_policies( + autoscaling_policies.ListAutoscalingPoliciesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.ListAutoscalingPoliciesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_autoscaling_policies( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_autoscaling_policies( + autoscaling_policies.ListAutoscalingPoliciesRequest(), + parent='parent_value', + ) + + +def test_list_autoscaling_policies_pager(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='abc', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], + next_page_token='def', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='ghi', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_autoscaling_policies(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, autoscaling_policies.AutoscalingPolicy) + for i in results) + +def test_list_autoscaling_policies_pages(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='abc', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], + next_page_token='def', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='ghi', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + pages = list(client.list_autoscaling_policies(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async_pager(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='abc', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], + next_page_token='def', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='ghi', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_autoscaling_policies(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, autoscaling_policies.AutoscalingPolicy) + for i in responses) + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async_pages(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='abc', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], + next_page_token='def', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token='ghi', + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_autoscaling_policies(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.DeleteAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_autoscaling_policy_from_dict(): + test_delete_autoscaling_policy(request_type=dict) + + +def test_delete_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + client.delete_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.DeleteAutoscalingPolicyRequest): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_async_from_dict(): + await test_delete_autoscaling_policy_async(request_type=dict) + + +def test_delete_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.DeleteAutoscalingPolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + call.return_value = None + client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.DeleteAutoscalingPolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_autoscaling_policy( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_autoscaling_policy( + autoscaling_policies.DeleteAutoscalingPolicyRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_autoscaling_policy( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_autoscaling_policy( + autoscaling_policies.DeleteAutoscalingPolicyRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalingPolicyServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalingPolicyServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = AutoscalingPolicyServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.AutoscalingPolicyServiceGrpcTransport, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.AutoscalingPolicyServiceGrpcTransport, + ) + +def test_autoscaling_policy_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.AutoscalingPolicyServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_autoscaling_policy_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.AutoscalingPolicyServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_autoscaling_policy', + 'update_autoscaling_policy', + 'get_autoscaling_policy', + 'list_autoscaling_policies', + 'delete_autoscaling_policy', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +@requires_google_auth_gte_1_25_0 +def test_autoscaling_policy_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AutoscalingPolicyServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_autoscaling_policy_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AutoscalingPolicyServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_autoscaling_policy_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AutoscalingPolicyServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_autoscaling_policy_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + AutoscalingPolicyServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_autoscaling_policy_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + AutoscalingPolicyServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.AutoscalingPolicyServiceGrpcTransport, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_autoscaling_policy_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.AutoscalingPolicyServiceGrpcTransport, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_autoscaling_policy_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.AutoscalingPolicyServiceGrpcTransport, grpc_helpers), + (transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_autoscaling_policy_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "dataproc.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="dataproc.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.AutoscalingPolicyServiceGrpcTransport, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport]) +def test_autoscaling_policy_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_autoscaling_policy_service_host_no_port(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), + ) + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_autoscaling_policy_service_host_with_port(): + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), + ) + assert client.transport._host == 'dataproc.googleapis.com:8000' + +def test_autoscaling_policy_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_autoscaling_policy_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.AutoscalingPolicyServiceGrpcTransport, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport]) +def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.AutoscalingPolicyServiceGrpcTransport, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport]) +def test_autoscaling_policy_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_autoscaling_policy_path(): + project = "squid" + location = "clam" + autoscaling_policy = "whelk" + expected = "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format(project=project, location=location, autoscaling_policy=autoscaling_policy, ) + actual = AutoscalingPolicyServiceClient.autoscaling_policy_path(project, location, autoscaling_policy) + assert expected == actual + + +def test_parse_autoscaling_policy_path(): + expected = { + "project": "octopus", + "location": "oyster", + "autoscaling_policy": "nudibranch", + } + path = AutoscalingPolicyServiceClient.autoscaling_policy_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_autoscaling_policy_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = AutoscalingPolicyServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = AutoscalingPolicyServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = AutoscalingPolicyServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = AutoscalingPolicyServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = AutoscalingPolicyServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = AutoscalingPolicyServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = AutoscalingPolicyServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = AutoscalingPolicyServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = AutoscalingPolicyServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = AutoscalingPolicyServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.AutoscalingPolicyServiceTransport, '_prep_wrapped_messages') as prep: + client = AutoscalingPolicyServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.AutoscalingPolicyServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = AutoscalingPolicyServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py new file mode 100644 index 00000000..d41f9004 --- /dev/null +++ b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py @@ -0,0 +1,2258 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1beta2.services.cluster_controller import ClusterControllerAsyncClient +from google.cloud.dataproc_v1beta2.services.cluster_controller import ClusterControllerClient +from google.cloud.dataproc_v1beta2.services.cluster_controller import pagers +from google.cloud.dataproc_v1beta2.services.cluster_controller import transports +from google.cloud.dataproc_v1beta2.services.cluster_controller.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.dataproc_v1beta2.types import clusters +from google.cloud.dataproc_v1beta2.types import operations +from google.cloud.dataproc_v1beta2.types import shared +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ClusterControllerClient._get_default_mtls_endpoint(None) is None + assert ClusterControllerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ClusterControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ClusterControllerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ClusterControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ClusterControllerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ClusterControllerClient, + ClusterControllerAsyncClient, +]) +def test_cluster_controller_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + ClusterControllerClient, + ClusterControllerAsyncClient, +]) +def test_cluster_controller_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ClusterControllerGrpcTransport, "grpc"), + (transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_cluster_controller_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + ClusterControllerClient, + ClusterControllerAsyncClient, +]) +def test_cluster_controller_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_cluster_controller_client_get_transport_class(): + transport = ClusterControllerClient.get_transport_class() + available_transports = [ + transports.ClusterControllerGrpcTransport, + ] + assert transport in available_transports + + transport = ClusterControllerClient.get_transport_class("grpc") + assert transport == transports.ClusterControllerGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), + (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(ClusterControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerClient)) +@mock.patch.object(ClusterControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerAsyncClient)) +def test_cluster_controller_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ClusterControllerClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ClusterControllerClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc", "true"), + (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc", "false"), + (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(ClusterControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerClient)) +@mock.patch.object(ClusterControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_cluster_controller_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), + (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_cluster_controller_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), + (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_cluster_controller_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_cluster_controller_client_client_options_from_dict(): + with mock.patch('google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = ClusterControllerClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_cluster(transport: str = 'grpc', request_type=clusters.CreateClusterRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_cluster_from_dict(): + test_create_cluster(request_type=dict) + + +def test_create_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + client.create_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.CreateClusterRequest() + + +@pytest.mark.asyncio +async def test_create_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.CreateClusterRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_cluster_async_from_dict(): + await test_create_cluster_async(request_type=dict) + + +def test_create_cluster_flattened(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + project_id='project_id_value', + region='region_value', + cluster=clusters.Cluster(project_id='project_id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster == clusters.Cluster(project_id='project_id_value') + + +def test_create_cluster_flattened_error(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + clusters.CreateClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster=clusters.Cluster(project_id='project_id_value'), + ) + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + project_id='project_id_value', + region='region_value', + cluster=clusters.Cluster(project_id='project_id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster == clusters.Cluster(project_id='project_id_value') + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + clusters.CreateClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster=clusters.Cluster(project_id='project_id_value'), + ) + + +def test_update_cluster(transport: str = 'grpc', request_type=clusters.UpdateClusterRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.UpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_cluster_from_dict(): + test_update_cluster(request_type=dict) + + +def test_update_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + client.update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.UpdateClusterRequest() + + +@pytest.mark.asyncio +async def test_update_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.UpdateClusterRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.UpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_cluster_async_from_dict(): + await test_update_cluster_async(request_type=dict) + + +def test_update_cluster_flattened(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + cluster=clusters.Cluster(project_id='project_id_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + assert args[0].cluster == clusters.Cluster(project_id='project_id_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_cluster_flattened_error(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_cluster( + clusters.UpdateClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + cluster=clusters.Cluster(project_id='project_id_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + cluster=clusters.Cluster(project_id='project_id_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + assert args[0].cluster == clusters.Cluster(project_id='project_id_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_cluster( + clusters.UpdateClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + cluster=clusters.Cluster(project_id='project_id_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_delete_cluster(transport: str = 'grpc', request_type=clusters.DeleteClusterRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_cluster_from_dict(): + test_delete_cluster(request_type=dict) + + +def test_delete_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + client.delete_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.DeleteClusterRequest() + + +@pytest.mark.asyncio +async def test_delete_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.DeleteClusterRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_cluster_async_from_dict(): + await test_delete_cluster_async(request_type=dict) + + +def test_delete_cluster_flattened(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + + +def test_delete_cluster_flattened_error(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + clusters.DeleteClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + clusters.DeleteClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + +def test_get_cluster(transport: str = 'grpc', request_type=clusters.GetClusterRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.Cluster( + project_id='project_id_value', + cluster_name='cluster_name_value', + cluster_uuid='cluster_uuid_value', + ) + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, clusters.Cluster) + assert response.project_id == 'project_id_value' + assert response.cluster_name == 'cluster_name_value' + assert response.cluster_uuid == 'cluster_uuid_value' + + +def test_get_cluster_from_dict(): + test_get_cluster(request_type=dict) + + +def test_get_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + client.get_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.GetClusterRequest() + + +@pytest.mark.asyncio +async def test_get_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.GetClusterRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(clusters.Cluster( + project_id='project_id_value', + cluster_name='cluster_name_value', + cluster_uuid='cluster_uuid_value', + )) + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, clusters.Cluster) + assert response.project_id == 'project_id_value' + assert response.cluster_name == 'cluster_name_value' + assert response.cluster_uuid == 'cluster_uuid_value' + + +@pytest.mark.asyncio +async def test_get_cluster_async_from_dict(): + await test_get_cluster_async(request_type=dict) + + +def test_get_cluster_flattened(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.Cluster() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + + +def test_get_cluster_flattened_error(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + clusters.GetClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(clusters.Cluster()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + clusters.GetClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + +def test_list_clusters(transport: str = 'grpc', request_type=clusters.ListClustersRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.ListClustersResponse( + next_page_token='next_page_token_value', + ) + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_clusters_from_dict(): + test_list_clusters(request_type=dict) + + +def test_list_clusters_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + client.list_clusters() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.ListClustersRequest() + + +@pytest.mark.asyncio +async def test_list_clusters_async(transport: str = 'grpc_asyncio', request_type=clusters.ListClustersRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(clusters.ListClustersResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_clusters_async_from_dict(): + await test_list_clusters_async(request_type=dict) + + +def test_list_clusters_flattened(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.ListClustersResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters( + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].filter == 'filter_value' + + +def test_list_clusters_flattened_error(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + clusters.ListClustersRequest(), + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(clusters.ListClustersResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters( + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].filter == 'filter_value' + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + clusters.ListClustersRequest(), + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + +def test_list_clusters_pager(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + clusters.Cluster(), + ], + next_page_token='abc', + ), + clusters.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + ], + next_page_token='ghi', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + ], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_clusters(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, clusters.Cluster) + for i in results) + +def test_list_clusters_pages(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + clusters.Cluster(), + ], + next_page_token='abc', + ), + clusters.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + ], + next_page_token='ghi', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + ], + ), + RuntimeError, + ) + pages = list(client.list_clusters(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_clusters_async_pager(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + clusters.Cluster(), + ], + next_page_token='abc', + ), + clusters.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + ], + next_page_token='ghi', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_clusters(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, clusters.Cluster) + for i in responses) + +@pytest.mark.asyncio +async def test_list_clusters_async_pages(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + clusters.Cluster(), + ], + next_page_token='abc', + ), + clusters.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + ], + next_page_token='ghi', + ), + clusters.ListClustersResponse( + clusters=[ + clusters.Cluster(), + clusters.Cluster(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_clusters(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_diagnose_cluster(transport: str = 'grpc', request_type=clusters.DiagnoseClusterRequest): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.diagnose_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.diagnose_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.DiagnoseClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_diagnose_cluster_from_dict(): + test_diagnose_cluster(request_type=dict) + + +def test_diagnose_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.diagnose_cluster), + '__call__') as call: + client.diagnose_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.DiagnoseClusterRequest() + + +@pytest.mark.asyncio +async def test_diagnose_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.DiagnoseClusterRequest): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.diagnose_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.diagnose_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == clusters.DiagnoseClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_diagnose_cluster_async_from_dict(): + await test_diagnose_cluster_async(request_type=dict) + + +def test_diagnose_cluster_flattened(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.diagnose_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.diagnose_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + + +def test_diagnose_cluster_flattened_error(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.diagnose_cluster( + clusters.DiagnoseClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + +@pytest.mark.asyncio +async def test_diagnose_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.diagnose_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.diagnose_cluster( + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].cluster_name == 'cluster_name_value' + + +@pytest.mark.asyncio +async def test_diagnose_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.diagnose_cluster( + clusters.DiagnoseClusterRequest(), + project_id='project_id_value', + region='region_value', + cluster_name='cluster_name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterControllerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterControllerClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ClusterControllerClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ClusterControllerGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.ClusterControllerGrpcTransport, + transports.ClusterControllerGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ClusterControllerGrpcTransport, + ) + +def test_cluster_controller_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ClusterControllerTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_cluster_controller_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ClusterControllerTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_cluster', + 'update_cluster', + 'delete_cluster', + 'get_cluster', + 'list_clusters', + 'diagnose_cluster', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_cluster_controller_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ClusterControllerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_cluster_controller_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ClusterControllerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_cluster_controller_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ClusterControllerTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_cluster_controller_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ClusterControllerClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_cluster_controller_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ClusterControllerClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterControllerGrpcTransport, + transports.ClusterControllerGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_cluster_controller_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterControllerGrpcTransport, + transports.ClusterControllerGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_cluster_controller_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ClusterControllerGrpcTransport, grpc_helpers), + (transports.ClusterControllerGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_cluster_controller_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "dataproc.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="dataproc.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.ClusterControllerGrpcTransport, transports.ClusterControllerGrpcAsyncIOTransport]) +def test_cluster_controller_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_cluster_controller_host_no_port(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), + ) + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_cluster_controller_host_with_port(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), + ) + assert client.transport._host == 'dataproc.googleapis.com:8000' + +def test_cluster_controller_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ClusterControllerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_cluster_controller_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ClusterControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ClusterControllerGrpcTransport, transports.ClusterControllerGrpcAsyncIOTransport]) +def test_cluster_controller_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ClusterControllerGrpcTransport, transports.ClusterControllerGrpcAsyncIOTransport]) +def test_cluster_controller_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_cluster_controller_grpc_lro_client(): + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_cluster_controller_grpc_lro_async_client(): + client = ClusterControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_cluster_path(): + project = "squid" + location = "clam" + cluster = "whelk" + expected = "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) + actual = ClusterControllerClient.cluster_path(project, location, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "octopus", + "location": "oyster", + "cluster": "nudibranch", + } + path = ClusterControllerClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_cluster_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ClusterControllerClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = ClusterControllerClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = ClusterControllerClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = ClusterControllerClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ClusterControllerClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = ClusterControllerClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = ClusterControllerClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = ClusterControllerClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ClusterControllerClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = ClusterControllerClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ClusterControllerTransport, '_prep_wrapped_messages') as prep: + client = ClusterControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ClusterControllerTransport, '_prep_wrapped_messages') as prep: + transport_class = ClusterControllerClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py new file mode 100644 index 00000000..246d7dc4 --- /dev/null +++ b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py @@ -0,0 +1,2371 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1beta2.services.job_controller import JobControllerAsyncClient +from google.cloud.dataproc_v1beta2.services.job_controller import JobControllerClient +from google.cloud.dataproc_v1beta2.services.job_controller import pagers +from google.cloud.dataproc_v1beta2.services.job_controller import transports +from google.cloud.dataproc_v1beta2.services.job_controller.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.dataproc_v1beta2.types import jobs +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert JobControllerClient._get_default_mtls_endpoint(None) is None + assert JobControllerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert JobControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert JobControllerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert JobControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert JobControllerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + JobControllerClient, + JobControllerAsyncClient, +]) +def test_job_controller_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + JobControllerClient, + JobControllerAsyncClient, +]) +def test_job_controller_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.JobControllerGrpcTransport, "grpc"), + (transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_controller_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + JobControllerClient, + JobControllerAsyncClient, +]) +def test_job_controller_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_job_controller_client_get_transport_class(): + transport = JobControllerClient.get_transport_class() + available_transports = [ + transports.JobControllerGrpcTransport, + ] + assert transport in available_transports + + transport = JobControllerClient.get_transport_class("grpc") + assert transport == transports.JobControllerGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), + (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(JobControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerClient)) +@mock.patch.object(JobControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerAsyncClient)) +def test_job_controller_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(JobControllerClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(JobControllerClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc", "true"), + (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc", "false"), + (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(JobControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerClient)) +@mock.patch.object(JobControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_job_controller_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), + (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_controller_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), + (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_controller_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_job_controller_client_client_options_from_dict(): + with mock.patch('google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = JobControllerClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_submit_job(transport: str = 'grpc', request_type=jobs.SubmitJobRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + submitted_by='submitted_by_value', + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), + ) + response = client.submit_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.SubmitJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.submitted_by == 'submitted_by_value' + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +def test_submit_job_from_dict(): + test_submit_job(request_type=dict) + + +def test_submit_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job), + '__call__') as call: + client.submit_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.SubmitJobRequest() + + +@pytest.mark.asyncio +async def test_submit_job_async(transport: str = 'grpc_asyncio', request_type=jobs.SubmitJobRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( + submitted_by='submitted_by_value', + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + )) + response = await client.submit_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.SubmitJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.submitted_by == 'submitted_by_value' + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +@pytest.mark.asyncio +async def test_submit_job_async_from_dict(): + await test_submit_job_async(request_type=dict) + + +def test_submit_job_flattened(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.submit_job( + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) + + +def test_submit_job_flattened_error(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.submit_job( + jobs.SubmitJobRequest(), + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + +@pytest.mark.asyncio +async def test_submit_job_flattened_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.submit_job( + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) + + +@pytest.mark.asyncio +async def test_submit_job_flattened_error_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.submit_job( + jobs.SubmitJobRequest(), + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + +def test_submit_job_as_operation(transport: str = 'grpc', request_type=jobs.SubmitJobRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job_as_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.submit_job_as_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.SubmitJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_submit_job_as_operation_from_dict(): + test_submit_job_as_operation(request_type=dict) + + +def test_submit_job_as_operation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job_as_operation), + '__call__') as call: + client.submit_job_as_operation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.SubmitJobRequest() + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_async(transport: str = 'grpc_asyncio', request_type=jobs.SubmitJobRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job_as_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.submit_job_as_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.SubmitJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_async_from_dict(): + await test_submit_job_as_operation_async(request_type=dict) + + +def test_submit_job_as_operation_flattened(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job_as_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.submit_job_as_operation( + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) + + +def test_submit_job_as_operation_flattened_error(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.submit_job_as_operation( + jobs.SubmitJobRequest(), + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_flattened_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job_as_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.submit_job_as_operation( + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_flattened_error_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.submit_job_as_operation( + jobs.SubmitJobRequest(), + project_id='project_id_value', + region='region_value', + job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), + ) + + +def test_get_job(transport: str = 'grpc', request_type=jobs.GetJobRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + submitted_by='submitted_by_value', + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), + ) + response = client.get_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.GetJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.submitted_by == 'submitted_by_value' + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +def test_get_job_from_dict(): + test_get_job(request_type=dict) + + +def test_get_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_job), + '__call__') as call: + client.get_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.GetJobRequest() + + +@pytest.mark.asyncio +async def test_get_job_async(transport: str = 'grpc_asyncio', request_type=jobs.GetJobRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( + submitted_by='submitted_by_value', + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + )) + response = await client.get_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.GetJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.submitted_by == 'submitted_by_value' + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +@pytest.mark.asyncio +async def test_get_job_async_from_dict(): + await test_get_job_async(request_type=dict) + + +def test_get_job_flattened(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_job( + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job_id == 'job_id_value' + + +def test_get_job_flattened_error(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_job( + jobs.GetJobRequest(), + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + +@pytest.mark.asyncio +async def test_get_job_flattened_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_job( + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job_id == 'job_id_value' + + +@pytest.mark.asyncio +async def test_get_job_flattened_error_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_job( + jobs.GetJobRequest(), + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + +def test_list_jobs(transport: str = 'grpc', request_type=jobs.ListJobsRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.ListJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.ListJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_jobs_from_dict(): + test_list_jobs(request_type=dict) + + +def test_list_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + client.list_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.ListJobsRequest() + + +@pytest.mark.asyncio +async def test_list_jobs_async(transport: str = 'grpc_asyncio', request_type=jobs.ListJobsRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.ListJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.ListJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_jobs_async_from_dict(): + await test_list_jobs_async(request_type=dict) + + +def test_list_jobs_flattened(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.ListJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_jobs( + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].filter == 'filter_value' + + +def test_list_jobs_flattened_error(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_jobs( + jobs.ListJobsRequest(), + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + +@pytest.mark.asyncio +async def test_list_jobs_flattened_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.ListJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.ListJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_jobs( + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].filter == 'filter_value' + + +@pytest.mark.asyncio +async def test_list_jobs_flattened_error_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_jobs( + jobs.ListJobsRequest(), + project_id='project_id_value', + region='region_value', + filter='filter_value', + ) + + +def test_list_jobs_pager(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + jobs.Job(), + ], + next_page_token='abc', + ), + jobs.ListJobsResponse( + jobs=[], + next_page_token='def', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + ], + next_page_token='ghi', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + ], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, jobs.Job) + for i in results) + +def test_list_jobs_pages(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + jobs.Job(), + ], + next_page_token='abc', + ), + jobs.ListJobsResponse( + jobs=[], + next_page_token='def', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + ], + next_page_token='ghi', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + ], + ), + RuntimeError, + ) + pages = list(client.list_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_jobs_async_pager(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + jobs.Job(), + ], + next_page_token='abc', + ), + jobs.ListJobsResponse( + jobs=[], + next_page_token='def', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + ], + next_page_token='ghi', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, jobs.Job) + for i in responses) + +@pytest.mark.asyncio +async def test_list_jobs_async_pages(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + jobs.Job(), + ], + next_page_token='abc', + ), + jobs.ListJobsResponse( + jobs=[], + next_page_token='def', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + ], + next_page_token='ghi', + ), + jobs.ListJobsResponse( + jobs=[ + jobs.Job(), + jobs.Job(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_job(transport: str = 'grpc', request_type=jobs.UpdateJobRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + submitted_by='submitted_by_value', + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), + ) + response = client.update_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.UpdateJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.submitted_by == 'submitted_by_value' + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +def test_update_job_from_dict(): + test_update_job(request_type=dict) + + +def test_update_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_job), + '__call__') as call: + client.update_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.UpdateJobRequest() + + +@pytest.mark.asyncio +async def test_update_job_async(transport: str = 'grpc_asyncio', request_type=jobs.UpdateJobRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( + submitted_by='submitted_by_value', + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + )) + response = await client.update_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.UpdateJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.submitted_by == 'submitted_by_value' + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +@pytest.mark.asyncio +async def test_update_job_async_from_dict(): + await test_update_job_async(request_type=dict) + + +def test_cancel_job(transport: str = 'grpc', request_type=jobs.CancelJobRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + submitted_by='submitted_by_value', + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), + ) + response = client.cancel_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.CancelJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.submitted_by == 'submitted_by_value' + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +def test_cancel_job_from_dict(): + test_cancel_job(request_type=dict) + + +def test_cancel_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_job), + '__call__') as call: + client.cancel_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.CancelJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_job_async(transport: str = 'grpc_asyncio', request_type=jobs.CancelJobRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( + submitted_by='submitted_by_value', + driver_output_resource_uri='driver_output_resource_uri_value', + driver_control_files_uri='driver_control_files_uri_value', + job_uuid='job_uuid_value', + done=True, + )) + response = await client.cancel_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.CancelJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + assert response.submitted_by == 'submitted_by_value' + assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' + assert response.driver_control_files_uri == 'driver_control_files_uri_value' + assert response.job_uuid == 'job_uuid_value' + assert response.done is True + + +@pytest.mark.asyncio +async def test_cancel_job_async_from_dict(): + await test_cancel_job_async(request_type=dict) + + +def test_cancel_job_flattened(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_job( + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job_id == 'job_id_value' + + +def test_cancel_job_flattened_error(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_job( + jobs.CancelJobRequest(), + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_job_flattened_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_job( + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job_id == 'job_id_value' + + +@pytest.mark.asyncio +async def test_cancel_job_flattened_error_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_job( + jobs.CancelJobRequest(), + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + +def test_delete_job(transport: str = 'grpc', request_type=jobs.DeleteJobRequest): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.DeleteJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_job_from_dict(): + test_delete_job(request_type=dict) + + +def test_delete_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_job), + '__call__') as call: + client.delete_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.DeleteJobRequest() + + +@pytest.mark.asyncio +async def test_delete_job_async(transport: str = 'grpc_asyncio', request_type=jobs.DeleteJobRequest): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == jobs.DeleteJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_job_async_from_dict(): + await test_delete_job_async(request_type=dict) + + +def test_delete_job_flattened(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_job( + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job_id == 'job_id_value' + + +def test_delete_job_flattened_error(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_job( + jobs.DeleteJobRequest(), + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + +@pytest.mark.asyncio +async def test_delete_job_flattened_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_job( + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].region == 'region_value' + assert args[0].job_id == 'job_id_value' + + +@pytest.mark.asyncio +async def test_delete_job_flattened_error_async(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_job( + jobs.DeleteJobRequest(), + project_id='project_id_value', + region='region_value', + job_id='job_id_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobControllerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobControllerClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = JobControllerClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.JobControllerGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.JobControllerGrpcTransport, + transports.JobControllerGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.JobControllerGrpcTransport, + ) + +def test_job_controller_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.JobControllerTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_job_controller_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.JobControllerTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'submit_job', + 'submit_job_as_operation', + 'get_job', + 'list_jobs', + 'update_job', + 'cancel_job', + 'delete_job', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_job_controller_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobControllerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_job_controller_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobControllerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_job_controller_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobControllerTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_job_controller_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + JobControllerClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_job_controller_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + JobControllerClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.JobControllerGrpcTransport, + transports.JobControllerGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_job_controller_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.JobControllerGrpcTransport, + transports.JobControllerGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_job_controller_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.JobControllerGrpcTransport, grpc_helpers), + (transports.JobControllerGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_job_controller_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "dataproc.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="dataproc.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.JobControllerGrpcTransport, transports.JobControllerGrpcAsyncIOTransport]) +def test_job_controller_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_job_controller_host_no_port(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), + ) + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_job_controller_host_with_port(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), + ) + assert client.transport._host == 'dataproc.googleapis.com:8000' + +def test_job_controller_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.JobControllerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_job_controller_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.JobControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.JobControllerGrpcTransport, transports.JobControllerGrpcAsyncIOTransport]) +def test_job_controller_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.JobControllerGrpcTransport, transports.JobControllerGrpcAsyncIOTransport]) +def test_job_controller_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_job_controller_grpc_lro_client(): + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_job_controller_grpc_lro_async_client(): + client = JobControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = JobControllerClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = JobControllerClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = JobControllerClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = JobControllerClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = JobControllerClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = JobControllerClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = JobControllerClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = JobControllerClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = JobControllerClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = JobControllerClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = JobControllerClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = JobControllerClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = JobControllerClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = JobControllerClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = JobControllerClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.JobControllerTransport, '_prep_wrapped_messages') as prep: + client = JobControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.JobControllerTransport, '_prep_wrapped_messages') as prep: + transport_class = JobControllerClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py new file mode 100644 index 00000000..12783033 --- /dev/null +++ b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py @@ -0,0 +1,2842 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1beta2.services.workflow_template_service import WorkflowTemplateServiceAsyncClient +from google.cloud.dataproc_v1beta2.services.workflow_template_service import WorkflowTemplateServiceClient +from google.cloud.dataproc_v1beta2.services.workflow_template_service import pagers +from google.cloud.dataproc_v1beta2.services.workflow_template_service import transports +from google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.dataproc_v1beta2.types import clusters +from google.cloud.dataproc_v1beta2.types import jobs +from google.cloud.dataproc_v1beta2.types import shared +from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(None) is None + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + WorkflowTemplateServiceClient, + WorkflowTemplateServiceAsyncClient, +]) +def test_workflow_template_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + WorkflowTemplateServiceClient, + WorkflowTemplateServiceAsyncClient, +]) +def test_workflow_template_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.WorkflowTemplateServiceGrpcTransport, "grpc"), + (transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_workflow_template_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + WorkflowTemplateServiceClient, + WorkflowTemplateServiceAsyncClient, +]) +def test_workflow_template_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_workflow_template_service_client_get_transport_class(): + transport = WorkflowTemplateServiceClient.get_transport_class() + available_transports = [ + transports.WorkflowTemplateServiceGrpcTransport, + ] + assert transport in available_transports + + transport = WorkflowTemplateServiceClient.get_transport_class("grpc") + assert transport == transports.WorkflowTemplateServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc"), + (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(WorkflowTemplateServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceClient)) +@mock.patch.object(WorkflowTemplateServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceAsyncClient)) +def test_workflow_template_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(WorkflowTemplateServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(WorkflowTemplateServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc", "true"), + (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc", "false"), + (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(WorkflowTemplateServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceClient)) +@mock.patch.object(WorkflowTemplateServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_workflow_template_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc"), + (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_workflow_template_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc"), + (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_workflow_template_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_workflow_template_service_client_client_options_from_dict(): + with mock.patch('google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = WorkflowTemplateServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_workflow_template(transport: str = 'grpc', request_type=workflow_templates.CreateWorkflowTemplateRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate( + id='id_value', + name='name_value', + version=774, + ) + response = client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + assert response.id == 'id_value' + assert response.name == 'name_value' + assert response.version == 774 + + +def test_create_workflow_template_from_dict(): + test_create_workflow_template(request_type=dict) + + +def test_create_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + client.create_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() + + +@pytest.mark.asyncio +async def test_create_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.CreateWorkflowTemplateRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate( + id='id_value', + name='name_value', + version=774, + )) + response = await client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + assert response.id == 'id_value' + assert response.name == 'name_value' + assert response.version == 774 + + +@pytest.mark.asyncio +async def test_create_workflow_template_async_from_dict(): + await test_create_workflow_template_async(request_type=dict) + + +def test_create_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.CreateWorkflowTemplateRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + call.return_value = workflow_templates.WorkflowTemplate() + client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.CreateWorkflowTemplateRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) + await client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_workflow_template( + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') + + +def test_create_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_workflow_template( + workflow_templates.CreateWorkflowTemplateRequest(), + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + +@pytest.mark.asyncio +async def test_create_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_workflow_template( + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') + + +@pytest.mark.asyncio +async def test_create_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_workflow_template( + workflow_templates.CreateWorkflowTemplateRequest(), + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + +def test_get_workflow_template(transport: str = 'grpc', request_type=workflow_templates.GetWorkflowTemplateRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate( + id='id_value', + name='name_value', + version=774, + ) + response = client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.GetWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + assert response.id == 'id_value' + assert response.name == 'name_value' + assert response.version == 774 + + +def test_get_workflow_template_from_dict(): + test_get_workflow_template(request_type=dict) + + +def test_get_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + client.get_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.GetWorkflowTemplateRequest() + + +@pytest.mark.asyncio +async def test_get_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.GetWorkflowTemplateRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate( + id='id_value', + name='name_value', + version=774, + )) + response = await client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.GetWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + assert response.id == 'id_value' + assert response.name == 'name_value' + assert response.version == 774 + + +@pytest.mark.asyncio +async def test_get_workflow_template_async_from_dict(): + await test_get_workflow_template_async(request_type=dict) + + +def test_get_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.GetWorkflowTemplateRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + call.return_value = workflow_templates.WorkflowTemplate() + client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.GetWorkflowTemplateRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) + await client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_workflow_template( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_workflow_template( + workflow_templates.GetWorkflowTemplateRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_workflow_template( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_workflow_template( + workflow_templates.GetWorkflowTemplateRequest(), + name='name_value', + ) + + +def test_instantiate_workflow_template(transport: str = 'grpc', request_type=workflow_templates.InstantiateWorkflowTemplateRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_instantiate_workflow_template_from_dict(): + test_instantiate_workflow_template(request_type=dict) + + +def test_instantiate_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + client.instantiate_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.InstantiateWorkflowTemplateRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_async_from_dict(): + await test_instantiate_workflow_template_async(request_type=dict) + + +def test_instantiate_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateWorkflowTemplateRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateWorkflowTemplateRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_instantiate_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.instantiate_workflow_template( + name='name_value', + parameters={'key_value': 'value_value'}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].parameters == {'key_value': 'value_value'} + + +def test_instantiate_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.instantiate_workflow_template( + workflow_templates.InstantiateWorkflowTemplateRequest(), + name='name_value', + parameters={'key_value': 'value_value'}, + ) + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.instantiate_workflow_template( + name='name_value', + parameters={'key_value': 'value_value'}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].parameters == {'key_value': 'value_value'} + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.instantiate_workflow_template( + workflow_templates.InstantiateWorkflowTemplateRequest(), + name='name_value', + parameters={'key_value': 'value_value'}, + ) + + +def test_instantiate_inline_workflow_template(transport: str = 'grpc', request_type=workflow_templates.InstantiateInlineWorkflowTemplateRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_instantiate_inline_workflow_template_from_dict(): + test_instantiate_inline_workflow_template(request_type=dict) + + +def test_instantiate_inline_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + client.instantiate_inline_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.InstantiateInlineWorkflowTemplateRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_async_from_dict(): + await test_instantiate_inline_workflow_template_async(request_type=dict) + + +def test_instantiate_inline_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_instantiate_inline_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.instantiate_inline_workflow_template( + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') + + +def test_instantiate_inline_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.instantiate_inline_workflow_template( + workflow_templates.InstantiateInlineWorkflowTemplateRequest(), + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.instantiate_inline_workflow_template( + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.instantiate_inline_workflow_template( + workflow_templates.InstantiateInlineWorkflowTemplateRequest(), + parent='parent_value', + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + +def test_update_workflow_template(transport: str = 'grpc', request_type=workflow_templates.UpdateWorkflowTemplateRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate( + id='id_value', + name='name_value', + version=774, + ) + response = client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + assert response.id == 'id_value' + assert response.name == 'name_value' + assert response.version == 774 + + +def test_update_workflow_template_from_dict(): + test_update_workflow_template(request_type=dict) + + +def test_update_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + client.update_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() + + +@pytest.mark.asyncio +async def test_update_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.UpdateWorkflowTemplateRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate( + id='id_value', + name='name_value', + version=774, + )) + response = await client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + assert response.id == 'id_value' + assert response.name == 'name_value' + assert response.version == 774 + + +@pytest.mark.asyncio +async def test_update_workflow_template_async_from_dict(): + await test_update_workflow_template_async(request_type=dict) + + +def test_update_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.UpdateWorkflowTemplateRequest() + + request.template.name = 'template.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + call.return_value = workflow_templates.WorkflowTemplate() + client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'template.name=template.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.UpdateWorkflowTemplateRequest() + + request.template.name = 'template.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) + await client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'template.name=template.name/value', + ) in kw['metadata'] + + +def test_update_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_workflow_template( + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') + + +def test_update_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_workflow_template( + workflow_templates.UpdateWorkflowTemplateRequest(), + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + +@pytest.mark.asyncio +async def test_update_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_workflow_template( + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') + + +@pytest.mark.asyncio +async def test_update_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_workflow_template( + workflow_templates.UpdateWorkflowTemplateRequest(), + template=workflow_templates.WorkflowTemplate(id='id_value'), + ) + + +def test_list_workflow_templates(transport: str = 'grpc', request_type=workflow_templates.ListWorkflowTemplatesRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.ListWorkflowTemplatesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListWorkflowTemplatesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_workflow_templates_from_dict(): + test_list_workflow_templates(request_type=dict) + + +def test_list_workflow_templates_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + client.list_workflow_templates() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() + + +@pytest.mark.asyncio +async def test_list_workflow_templates_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.ListWorkflowTemplatesRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.ListWorkflowTemplatesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListWorkflowTemplatesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_workflow_templates_async_from_dict(): + await test_list_workflow_templates_async(request_type=dict) + + +def test_list_workflow_templates_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.ListWorkflowTemplatesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + call.return_value = workflow_templates.ListWorkflowTemplatesResponse() + client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_workflow_templates_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.ListWorkflowTemplatesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.ListWorkflowTemplatesResponse()) + await client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_workflow_templates_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.ListWorkflowTemplatesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_workflow_templates( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_workflow_templates_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_workflow_templates( + workflow_templates.ListWorkflowTemplatesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_workflow_templates_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.ListWorkflowTemplatesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.ListWorkflowTemplatesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_workflow_templates( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_workflow_templates_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_workflow_templates( + workflow_templates.ListWorkflowTemplatesRequest(), + parent='parent_value', + ) + + +def test_list_workflow_templates_pager(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token='abc', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], + next_page_token='def', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + ], + next_page_token='ghi', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_workflow_templates(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, workflow_templates.WorkflowTemplate) + for i in results) + +def test_list_workflow_templates_pages(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token='abc', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], + next_page_token='def', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + ], + next_page_token='ghi', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + pages = list(client.list_workflow_templates(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_workflow_templates_async_pager(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token='abc', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], + next_page_token='def', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + ], + next_page_token='ghi', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_workflow_templates(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, workflow_templates.WorkflowTemplate) + for i in responses) + +@pytest.mark.asyncio +async def test_list_workflow_templates_async_pages(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token='abc', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], + next_page_token='def', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + ], + next_page_token='ghi', + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_workflow_templates(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_workflow_template(transport: str = 'grpc', request_type=workflow_templates.DeleteWorkflowTemplateRequest): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_workflow_template_from_dict(): + test_delete_workflow_template(request_type=dict) + + +def test_delete_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + client.delete_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() + + +@pytest.mark.asyncio +async def test_delete_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.DeleteWorkflowTemplateRequest): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_workflow_template_async_from_dict(): + await test_delete_workflow_template_async(request_type=dict) + + +def test_delete_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.DeleteWorkflowTemplateRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + call.return_value = None + client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.DeleteWorkflowTemplateRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_workflow_template( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_workflow_template( + workflow_templates.DeleteWorkflowTemplateRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_workflow_template( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_workflow_template( + workflow_templates.DeleteWorkflowTemplateRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = WorkflowTemplateServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = WorkflowTemplateServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = WorkflowTemplateServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.WorkflowTemplateServiceGrpcTransport, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.WorkflowTemplateServiceGrpcTransport, + ) + +def test_workflow_template_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.WorkflowTemplateServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_workflow_template_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.WorkflowTemplateServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_workflow_template', + 'get_workflow_template', + 'instantiate_workflow_template', + 'instantiate_inline_workflow_template', + 'update_workflow_template', + 'list_workflow_templates', + 'delete_workflow_template', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_workflow_template_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.WorkflowTemplateServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_workflow_template_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.WorkflowTemplateServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_workflow_template_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.WorkflowTemplateServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_workflow_template_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + WorkflowTemplateServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_workflow_template_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + WorkflowTemplateServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.WorkflowTemplateServiceGrpcTransport, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_workflow_template_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.WorkflowTemplateServiceGrpcTransport, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_workflow_template_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.WorkflowTemplateServiceGrpcTransport, grpc_helpers), + (transports.WorkflowTemplateServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_workflow_template_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "dataproc.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="dataproc.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.WorkflowTemplateServiceGrpcTransport, transports.WorkflowTemplateServiceGrpcAsyncIOTransport]) +def test_workflow_template_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_workflow_template_service_host_no_port(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), + ) + assert client.transport._host == 'dataproc.googleapis.com:443' + + +def test_workflow_template_service_host_with_port(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), + ) + assert client.transport._host == 'dataproc.googleapis.com:8000' + +def test_workflow_template_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.WorkflowTemplateServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_workflow_template_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.WorkflowTemplateServiceGrpcTransport, transports.WorkflowTemplateServiceGrpcAsyncIOTransport]) +def test_workflow_template_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.WorkflowTemplateServiceGrpcTransport, transports.WorkflowTemplateServiceGrpcAsyncIOTransport]) +def test_workflow_template_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_workflow_template_service_grpc_lro_client(): + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_workflow_template_service_grpc_lro_async_client(): + client = WorkflowTemplateServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_cluster_path(): + project = "squid" + location = "clam" + cluster = "whelk" + expected = "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) + actual = WorkflowTemplateServiceClient.cluster_path(project, location, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "octopus", + "location": "oyster", + "cluster": "nudibranch", + } + path = WorkflowTemplateServiceClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_cluster_path(path) + assert expected == actual + +def test_workflow_template_path(): + project = "cuttlefish" + region = "mussel" + workflow_template = "winkle" + expected = "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format(project=project, region=region, workflow_template=workflow_template, ) + actual = WorkflowTemplateServiceClient.workflow_template_path(project, region, workflow_template) + assert expected == actual + + +def test_parse_workflow_template_path(): + expected = { + "project": "nautilus", + "region": "scallop", + "workflow_template": "abalone", + } + path = WorkflowTemplateServiceClient.workflow_template_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_workflow_template_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = WorkflowTemplateServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = WorkflowTemplateServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = WorkflowTemplateServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = WorkflowTemplateServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = WorkflowTemplateServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = WorkflowTemplateServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = WorkflowTemplateServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = WorkflowTemplateServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = WorkflowTemplateServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = WorkflowTemplateServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.WorkflowTemplateServiceTransport, '_prep_wrapped_messages') as prep: + client = WorkflowTemplateServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.WorkflowTemplateServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = WorkflowTemplateServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) From 2e4b6f67ae05191b4b0e58ac74d67f4d3483db83 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 29 Jun 2021 22:26:06 +0000 Subject: [PATCH 2/2] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/master/packages/owl-bot/README.md --- .../transports/base.py | 2 +- .../transports/grpc.py | 5 +- .../transports/grpc_asyncio.py | 5 +- .../cluster_controller/transports/base.py | 2 +- .../cluster_controller/transports/grpc.py | 5 +- .../transports/grpc_asyncio.py | 5 +- .../job_controller/transports/base.py | 2 +- .../job_controller/transports/grpc.py | 5 +- .../job_controller/transports/grpc_asyncio.py | 5 +- .../transports/base.py | 2 +- .../transports/grpc.py | 5 +- .../transports/grpc_asyncio.py | 5 +- .../transports/base.py | 2 +- .../transports/grpc.py | 5 +- .../transports/grpc_asyncio.py | 5 +- .../cluster_controller/transports/base.py | 2 +- .../cluster_controller/transports/grpc.py | 5 +- .../transports/grpc_asyncio.py | 5 +- .../job_controller/transports/base.py | 2 +- .../job_controller/transports/grpc.py | 5 +- .../job_controller/transports/grpc_asyncio.py | 5 +- .../transports/base.py | 2 +- .../transports/grpc.py | 5 +- .../transports/grpc_asyncio.py | 5 +- owl-bot-staging/v1/.coveragerc | 17 - owl-bot-staging/v1/MANIFEST.in | 2 - owl-bot-staging/v1/README.rst | 49 - owl-bot-staging/v1/docs/conf.py | 376 --- .../autoscaling_policy_service.rst | 10 - .../docs/dataproc_v1/cluster_controller.rst | 10 - .../v1/docs/dataproc_v1/job_controller.rst | 10 - .../v1/docs/dataproc_v1/services.rst | 9 - owl-bot-staging/v1/docs/dataproc_v1/types.rst | 7 - .../dataproc_v1/workflow_template_service.rst | 10 - owl-bot-staging/v1/docs/index.rst | 7 - .../v1/google/cloud/dataproc/__init__.py | 217 -- .../v1/google/cloud/dataproc/py.typed | 2 - .../v1/google/cloud/dataproc_v1/__init__.py | 218 -- .../cloud/dataproc_v1/gapic_metadata.json | 335 -- .../v1/google/cloud/dataproc_v1/py.typed | 2 - .../cloud/dataproc_v1/services/__init__.py | 15 - .../autoscaling_policy_service/__init__.py | 22 - .../async_client.py | 624 ---- .../autoscaling_policy_service/client.py | 790 ----- .../autoscaling_policy_service/pagers.py | 140 - .../transports/__init__.py | 33 - .../transports/base.py | 246 -- .../transports/grpc.py | 363 --- .../transports/grpc_asyncio.py | 367 --- .../services/cluster_controller/__init__.py | 22 - .../cluster_controller/async_client.py | 1020 ------ .../services/cluster_controller/client.py | 1178 ------- .../services/cluster_controller/pagers.py | 140 - .../cluster_controller/transports/__init__.py | 33 - .../cluster_controller/transports/base.py | 313 -- .../cluster_controller/transports/grpc.py | 472 --- .../transports/grpc_asyncio.py | 476 --- .../services/job_controller/__init__.py | 22 - .../services/job_controller/async_client.py | 796 ----- .../services/job_controller/client.py | 927 ------ .../services/job_controller/pagers.py | 140 - .../job_controller/transports/__init__.py | 33 - .../job_controller/transports/base.py | 308 -- .../job_controller/transports/grpc.py | 434 --- .../job_controller/transports/grpc_asyncio.py | 438 --- .../workflow_template_service/__init__.py | 22 - .../workflow_template_service/async_client.py | 945 ------ .../workflow_template_service/client.py | 1103 ------- .../workflow_template_service/pagers.py | 140 - .../transports/__init__.py | 33 - .../transports/base.py | 306 -- .../transports/grpc.py | 481 --- .../transports/grpc_asyncio.py | 485 --- .../cloud/dataproc_v1/types/__init__.py | 209 -- .../dataproc_v1/types/autoscaling_policies.py | 416 --- .../cloud/dataproc_v1/types/clusters.py | 1797 ----------- .../v1/google/cloud/dataproc_v1/types/jobs.py | 1368 -------- .../cloud/dataproc_v1/types/operations.py | 133 - .../google/cloud/dataproc_v1/types/shared.py | 46 - .../dataproc_v1/types/workflow_templates.py | 1050 ------ owl-bot-staging/v1/mypy.ini | 3 - owl-bot-staging/v1/noxfile.py | 132 - .../v1/scripts/fixup_dataproc_v1_keywords.py | 202 -- owl-bot-staging/v1/setup.py | 53 - owl-bot-staging/v1/tests/__init__.py | 16 - owl-bot-staging/v1/tests/unit/__init__.py | 16 - .../v1/tests/unit/gapic/__init__.py | 16 - .../tests/unit/gapic/dataproc_v1/__init__.py | 16 - .../test_autoscaling_policy_service.py | 2293 ------------- .../dataproc_v1/test_cluster_controller.py | 2449 -------------- .../gapic/dataproc_v1/test_job_controller.py | 2355 -------------- .../test_workflow_template_service.py | 2863 ----------------- owl-bot-staging/v1beta2/.coveragerc | 17 - owl-bot-staging/v1beta2/MANIFEST.in | 2 - owl-bot-staging/v1beta2/README.rst | 49 - owl-bot-staging/v1beta2/docs/conf.py | 376 --- .../autoscaling_policy_service.rst | 10 - .../dataproc_v1beta2/cluster_controller.rst | 10 - .../docs/dataproc_v1beta2/job_controller.rst | 10 - .../docs/dataproc_v1beta2/services.rst | 9 - .../v1beta2/docs/dataproc_v1beta2/types.rst | 7 - .../workflow_template_service.rst | 10 - owl-bot-staging/v1beta2/docs/index.rst | 7 - .../v1beta2/google/cloud/dataproc/__init__.py | 205 -- .../v1beta2/google/cloud/dataproc/py.typed | 2 - .../google/cloud/dataproc_v1beta2/__init__.py | 206 -- .../dataproc_v1beta2/gapic_metadata.json | 315 -- .../google/cloud/dataproc_v1beta2/py.typed | 2 - .../dataproc_v1beta2/services/__init__.py | 15 - .../autoscaling_policy_service/__init__.py | 22 - .../async_client.py | 623 ---- .../autoscaling_policy_service/client.py | 789 ----- .../autoscaling_policy_service/pagers.py | 140 - .../transports/__init__.py | 33 - .../transports/base.py | 246 -- .../transports/grpc.py | 363 --- .../transports/grpc_asyncio.py | 367 --- .../services/cluster_controller/__init__.py | 22 - .../cluster_controller/async_client.py | 923 ------ .../services/cluster_controller/client.py | 1070 ------ .../services/cluster_controller/pagers.py | 140 - .../cluster_controller/transports/__init__.py | 33 - .../cluster_controller/transports/base.py | 285 -- .../cluster_controller/transports/grpc.py | 419 --- .../transports/grpc_asyncio.py | 423 --- .../services/job_controller/__init__.py | 22 - .../services/job_controller/async_client.py | 796 ----- .../services/job_controller/client.py | 927 ------ .../services/job_controller/pagers.py | 140 - .../job_controller/transports/__init__.py | 33 - .../job_controller/transports/base.py | 308 -- .../job_controller/transports/grpc.py | 434 --- .../job_controller/transports/grpc_asyncio.py | 438 --- .../workflow_template_service/__init__.py | 22 - .../workflow_template_service/async_client.py | 943 ------ .../workflow_template_service/client.py | 1092 ------- .../workflow_template_service/pagers.py | 140 - .../transports/__init__.py | 33 - .../transports/base.py | 306 -- .../transports/grpc.py | 481 --- .../transports/grpc_asyncio.py | 485 --- .../cloud/dataproc_v1beta2/types/__init__.py | 197 -- .../types/autoscaling_policies.py | 416 --- .../cloud/dataproc_v1beta2/types/clusters.py | 1545 --------- .../cloud/dataproc_v1beta2/types/jobs.py | 1364 -------- .../dataproc_v1beta2/types/operations.py | 133 - .../cloud/dataproc_v1beta2/types/shared.py | 43 - .../types/workflow_templates.py | 1073 ------ owl-bot-staging/v1beta2/mypy.ini | 3 - owl-bot-staging/v1beta2/noxfile.py | 132 - .../fixup_dataproc_v1beta2_keywords.py | 200 -- owl-bot-staging/v1beta2/setup.py | 53 - owl-bot-staging/v1beta2/tests/__init__.py | 16 - .../v1beta2/tests/unit/__init__.py | 16 - .../v1beta2/tests/unit/gapic/__init__.py | 16 - .../unit/gapic/dataproc_v1beta2/__init__.py | 16 - .../test_autoscaling_policy_service.py | 2293 ------------- .../test_cluster_controller.py | 2258 ------------- .../dataproc_v1beta2/test_job_controller.py | 2371 -------------- .../test_workflow_template_service.py | 2842 ---------------- .../test_autoscaling_policy_service.py | 26 +- .../dataproc_v1/test_cluster_controller.py | 26 +- .../gapic/dataproc_v1/test_job_controller.py | 26 +- .../test_workflow_template_service.py | 26 +- .../test_autoscaling_policy_service.py | 26 +- .../test_cluster_controller.py | 26 +- .../dataproc_v1beta2/test_job_controller.py | 26 +- .../test_workflow_template_service.py | 26 +- 168 files changed, 248 insertions(+), 58374 deletions(-) delete mode 100644 owl-bot-staging/v1/.coveragerc delete mode 100644 owl-bot-staging/v1/MANIFEST.in delete mode 100644 owl-bot-staging/v1/README.rst delete mode 100644 owl-bot-staging/v1/docs/conf.py delete mode 100644 owl-bot-staging/v1/docs/dataproc_v1/autoscaling_policy_service.rst delete mode 100644 owl-bot-staging/v1/docs/dataproc_v1/cluster_controller.rst delete mode 100644 owl-bot-staging/v1/docs/dataproc_v1/job_controller.rst delete mode 100644 owl-bot-staging/v1/docs/dataproc_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/dataproc_v1/types.rst delete mode 100644 owl-bot-staging/v1/docs/dataproc_v1/workflow_template_service.rst delete mode 100644 owl-bot-staging/v1/docs/index.rst delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/autoscaling_policies.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/clusters.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/jobs.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/operations.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/shared.py delete mode 100644 owl-bot-staging/v1/google/cloud/dataproc_v1/types/workflow_templates.py delete mode 100644 owl-bot-staging/v1/mypy.ini delete mode 100644 owl-bot-staging/v1/noxfile.py delete mode 100644 owl-bot-staging/v1/scripts/fixup_dataproc_v1_keywords.py delete mode 100644 owl-bot-staging/v1/setup.py delete mode 100644 owl-bot-staging/v1/tests/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_cluster_controller.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_job_controller.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py delete mode 100644 owl-bot-staging/v1beta2/.coveragerc delete mode 100644 owl-bot-staging/v1beta2/MANIFEST.in delete mode 100644 owl-bot-staging/v1beta2/README.rst delete mode 100644 owl-bot-staging/v1beta2/docs/conf.py delete mode 100644 owl-bot-staging/v1beta2/docs/dataproc_v1beta2/autoscaling_policy_service.rst delete mode 100644 owl-bot-staging/v1beta2/docs/dataproc_v1beta2/cluster_controller.rst delete mode 100644 owl-bot-staging/v1beta2/docs/dataproc_v1beta2/job_controller.rst delete mode 100644 owl-bot-staging/v1beta2/docs/dataproc_v1beta2/services.rst delete mode 100644 owl-bot-staging/v1beta2/docs/dataproc_v1beta2/types.rst delete mode 100644 owl-bot-staging/v1beta2/docs/dataproc_v1beta2/workflow_template_service.rst delete mode 100644 owl-bot-staging/v1beta2/docs/index.rst delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc/__init__.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc/py.typed delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/__init__.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/py.typed delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/__init__.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/__init__.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/client.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/__init__.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/clusters.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/jobs.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/operations.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/shared.py delete mode 100644 owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/workflow_templates.py delete mode 100644 owl-bot-staging/v1beta2/mypy.ini delete mode 100644 owl-bot-staging/v1beta2/noxfile.py delete mode 100644 owl-bot-staging/v1beta2/scripts/fixup_dataproc_v1beta2_keywords.py delete mode 100644 owl-bot-staging/v1beta2/setup.py delete mode 100644 owl-bot-staging/v1beta2/tests/__init__.py delete mode 100644 owl-bot-staging/v1beta2/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1beta2/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/__init__.py delete mode 100644 owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py delete mode 100644 owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py delete mode 100644 owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py delete mode 100644 owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py index 7f96fc08..da99b6a8 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py @@ -97,7 +97,7 @@ def __init__( scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. - self._scopes = scopes or self.AUTH_SCOPES + self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py index 83eceddb..e55dcb1d 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py @@ -59,6 +59,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -99,6 +100,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -151,7 +154,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py index 87aff41d..654f7eee 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py @@ -105,6 +105,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -146,6 +147,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -197,7 +200,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py index 2b10531a..39261518 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py @@ -98,7 +98,7 @@ def __init__( scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. - self._scopes = scopes or self.AUTH_SCOPES + self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py index fce4826d..8e063266 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py @@ -60,6 +60,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -100,6 +101,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -153,7 +156,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py index d4f98222..c9d6a6a1 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py @@ -106,6 +106,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -147,6 +148,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -199,7 +202,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/base.py b/google/cloud/dataproc_v1/services/job_controller/transports/base.py index 82a32134..306acbef 100644 --- a/google/cloud/dataproc_v1/services/job_controller/transports/base.py +++ b/google/cloud/dataproc_v1/services/job_controller/transports/base.py @@ -99,7 +99,7 @@ def __init__( scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. - self._scopes = scopes or self.AUTH_SCOPES + self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py index e5a56908..0215e3ba 100644 --- a/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py @@ -60,6 +60,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -100,6 +101,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -153,7 +156,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py index 99e7c6f7..5a58094b 100644 --- a/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py @@ -106,6 +106,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -147,6 +148,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -199,7 +202,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py index 6959aacd..4c59aa97 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py @@ -99,7 +99,7 @@ def __init__( scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. - self._scopes = scopes or self.AUTH_SCOPES + self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py index 4f988f6d..9dc3315b 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py @@ -61,6 +61,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -101,6 +102,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -154,7 +157,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py index 702339f9..5d4b23c7 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py @@ -107,6 +107,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -148,6 +149,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -200,7 +203,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py index 85069224..76d9df69 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py @@ -97,7 +97,7 @@ def __init__( scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. - self._scopes = scopes or self.AUTH_SCOPES + self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py index a746d386..84a559a2 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py @@ -59,6 +59,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -99,6 +100,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -151,7 +154,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py index 03a1269f..eef777f7 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py @@ -105,6 +105,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -146,6 +147,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -197,7 +200,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py index 9b4130cc..b09b97d4 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py @@ -98,7 +98,7 @@ def __init__( scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. - self._scopes = scopes or self.AUTH_SCOPES + self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py index 802de6e7..af785845 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py @@ -60,6 +60,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -100,6 +101,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -153,7 +156,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py index 240d4ba9..30e4f09f 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py @@ -106,6 +106,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -147,6 +148,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -199,7 +202,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py index c6eeeca0..7cd90444 100644 --- a/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py @@ -99,7 +99,7 @@ def __init__( scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. - self._scopes = scopes or self.AUTH_SCOPES + self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py index 4a9c30e4..28ca9391 100644 --- a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py @@ -60,6 +60,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -100,6 +101,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -153,7 +156,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py index 40858b9a..1afaab05 100644 --- a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py @@ -106,6 +106,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -147,6 +148,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -199,7 +202,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py index 4536a376..4e61207f 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py @@ -99,7 +99,7 @@ def __init__( scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. - self._scopes = scopes or self.AUTH_SCOPES + self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py index 3b8cc785..fba1d0f9 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py @@ -61,6 +61,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -101,6 +102,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -154,7 +157,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py index 7b14912a..65666072 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py @@ -107,6 +107,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -148,6 +149,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -200,7 +203,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/owl-bot-staging/v1/.coveragerc b/owl-bot-staging/v1/.coveragerc deleted file mode 100644 index 240638d1..00000000 --- a/owl-bot-staging/v1/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/dataproc/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/MANIFEST.in b/owl-bot-staging/v1/MANIFEST.in deleted file mode 100644 index 425f6657..00000000 --- a/owl-bot-staging/v1/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/dataproc *.py -recursive-include google/cloud/dataproc_v1 *.py diff --git a/owl-bot-staging/v1/README.rst b/owl-bot-staging/v1/README.rst deleted file mode 100644 index b751dfd9..00000000 --- a/owl-bot-staging/v1/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Dataproc API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Dataproc API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/docs/conf.py b/owl-bot-staging/v1/docs/conf.py deleted file mode 100644 index 02417582..00000000 --- a/owl-bot-staging/v1/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-dataproc documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-dataproc" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-dataproc-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-dataproc.tex", - u"google-cloud-dataproc Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-dataproc", - u"Google Cloud Dataproc Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-dataproc", - u"google-cloud-dataproc Documentation", - author, - "google-cloud-dataproc", - "GAPIC library for Google Cloud Dataproc API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/docs/dataproc_v1/autoscaling_policy_service.rst b/owl-bot-staging/v1/docs/dataproc_v1/autoscaling_policy_service.rst deleted file mode 100644 index 9b885c57..00000000 --- a/owl-bot-staging/v1/docs/dataproc_v1/autoscaling_policy_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -AutoscalingPolicyService ------------------------------------------- - -.. automodule:: google.cloud.dataproc_v1.services.autoscaling_policy_service - :members: - :inherited-members: - -.. automodule:: google.cloud.dataproc_v1.services.autoscaling_policy_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/dataproc_v1/cluster_controller.rst b/owl-bot-staging/v1/docs/dataproc_v1/cluster_controller.rst deleted file mode 100644 index d9b7f2ad..00000000 --- a/owl-bot-staging/v1/docs/dataproc_v1/cluster_controller.rst +++ /dev/null @@ -1,10 +0,0 @@ -ClusterController ------------------------------------ - -.. automodule:: google.cloud.dataproc_v1.services.cluster_controller - :members: - :inherited-members: - -.. automodule:: google.cloud.dataproc_v1.services.cluster_controller.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/dataproc_v1/job_controller.rst b/owl-bot-staging/v1/docs/dataproc_v1/job_controller.rst deleted file mode 100644 index 5f14863b..00000000 --- a/owl-bot-staging/v1/docs/dataproc_v1/job_controller.rst +++ /dev/null @@ -1,10 +0,0 @@ -JobController -------------------------------- - -.. automodule:: google.cloud.dataproc_v1.services.job_controller - :members: - :inherited-members: - -.. automodule:: google.cloud.dataproc_v1.services.job_controller.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/dataproc_v1/services.rst b/owl-bot-staging/v1/docs/dataproc_v1/services.rst deleted file mode 100644 index 9d91e7ce..00000000 --- a/owl-bot-staging/v1/docs/dataproc_v1/services.rst +++ /dev/null @@ -1,9 +0,0 @@ -Services for Google Cloud Dataproc v1 API -========================================= -.. toctree:: - :maxdepth: 2 - - autoscaling_policy_service - cluster_controller - job_controller - workflow_template_service diff --git a/owl-bot-staging/v1/docs/dataproc_v1/types.rst b/owl-bot-staging/v1/docs/dataproc_v1/types.rst deleted file mode 100644 index bc1a0a30..00000000 --- a/owl-bot-staging/v1/docs/dataproc_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Dataproc v1 API -====================================== - -.. automodule:: google.cloud.dataproc_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/docs/dataproc_v1/workflow_template_service.rst b/owl-bot-staging/v1/docs/dataproc_v1/workflow_template_service.rst deleted file mode 100644 index 0f301cee..00000000 --- a/owl-bot-staging/v1/docs/dataproc_v1/workflow_template_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -WorkflowTemplateService ------------------------------------------ - -.. automodule:: google.cloud.dataproc_v1.services.workflow_template_service - :members: - :inherited-members: - -.. automodule:: google.cloud.dataproc_v1.services.workflow_template_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/index.rst b/owl-bot-staging/v1/docs/index.rst deleted file mode 100644 index 3bf4df8b..00000000 --- a/owl-bot-staging/v1/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - dataproc_v1/services - dataproc_v1/types diff --git a/owl-bot-staging/v1/google/cloud/dataproc/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc/__init__.py deleted file mode 100644 index 4ef0034e..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc/__init__.py +++ /dev/null @@ -1,217 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.cloud.dataproc_v1.services.autoscaling_policy_service.client import AutoscalingPolicyServiceClient -from google.cloud.dataproc_v1.services.autoscaling_policy_service.async_client import AutoscalingPolicyServiceAsyncClient -from google.cloud.dataproc_v1.services.cluster_controller.client import ClusterControllerClient -from google.cloud.dataproc_v1.services.cluster_controller.async_client import ClusterControllerAsyncClient -from google.cloud.dataproc_v1.services.job_controller.client import JobControllerClient -from google.cloud.dataproc_v1.services.job_controller.async_client import JobControllerAsyncClient -from google.cloud.dataproc_v1.services.workflow_template_service.client import WorkflowTemplateServiceClient -from google.cloud.dataproc_v1.services.workflow_template_service.async_client import WorkflowTemplateServiceAsyncClient - -from google.cloud.dataproc_v1.types.autoscaling_policies import AutoscalingPolicy -from google.cloud.dataproc_v1.types.autoscaling_policies import BasicAutoscalingAlgorithm -from google.cloud.dataproc_v1.types.autoscaling_policies import BasicYarnAutoscalingConfig -from google.cloud.dataproc_v1.types.autoscaling_policies import CreateAutoscalingPolicyRequest -from google.cloud.dataproc_v1.types.autoscaling_policies import DeleteAutoscalingPolicyRequest -from google.cloud.dataproc_v1.types.autoscaling_policies import GetAutoscalingPolicyRequest -from google.cloud.dataproc_v1.types.autoscaling_policies import InstanceGroupAutoscalingPolicyConfig -from google.cloud.dataproc_v1.types.autoscaling_policies import ListAutoscalingPoliciesRequest -from google.cloud.dataproc_v1.types.autoscaling_policies import ListAutoscalingPoliciesResponse -from google.cloud.dataproc_v1.types.autoscaling_policies import UpdateAutoscalingPolicyRequest -from google.cloud.dataproc_v1.types.clusters import AcceleratorConfig -from google.cloud.dataproc_v1.types.clusters import AutoscalingConfig -from google.cloud.dataproc_v1.types.clusters import Cluster -from google.cloud.dataproc_v1.types.clusters import ClusterConfig -from google.cloud.dataproc_v1.types.clusters import ClusterMetrics -from google.cloud.dataproc_v1.types.clusters import ClusterStatus -from google.cloud.dataproc_v1.types.clusters import CreateClusterRequest -from google.cloud.dataproc_v1.types.clusters import DeleteClusterRequest -from google.cloud.dataproc_v1.types.clusters import DiagnoseClusterRequest -from google.cloud.dataproc_v1.types.clusters import DiagnoseClusterResults -from google.cloud.dataproc_v1.types.clusters import DiskConfig -from google.cloud.dataproc_v1.types.clusters import EncryptionConfig -from google.cloud.dataproc_v1.types.clusters import EndpointConfig -from google.cloud.dataproc_v1.types.clusters import GceClusterConfig -from google.cloud.dataproc_v1.types.clusters import GetClusterRequest -from google.cloud.dataproc_v1.types.clusters import GkeClusterConfig -from google.cloud.dataproc_v1.types.clusters import IdentityConfig -from google.cloud.dataproc_v1.types.clusters import InstanceGroupConfig -from google.cloud.dataproc_v1.types.clusters import KerberosConfig -from google.cloud.dataproc_v1.types.clusters import LifecycleConfig -from google.cloud.dataproc_v1.types.clusters import ListClustersRequest -from google.cloud.dataproc_v1.types.clusters import ListClustersResponse -from google.cloud.dataproc_v1.types.clusters import ManagedGroupConfig -from google.cloud.dataproc_v1.types.clusters import MetastoreConfig -from google.cloud.dataproc_v1.types.clusters import NodeGroupAffinity -from google.cloud.dataproc_v1.types.clusters import NodeInitializationAction -from google.cloud.dataproc_v1.types.clusters import ReservationAffinity -from google.cloud.dataproc_v1.types.clusters import SecurityConfig -from google.cloud.dataproc_v1.types.clusters import ShieldedInstanceConfig -from google.cloud.dataproc_v1.types.clusters import SoftwareConfig -from google.cloud.dataproc_v1.types.clusters import StartClusterRequest -from google.cloud.dataproc_v1.types.clusters import StopClusterRequest -from google.cloud.dataproc_v1.types.clusters import UpdateClusterRequest -from google.cloud.dataproc_v1.types.jobs import CancelJobRequest -from google.cloud.dataproc_v1.types.jobs import DeleteJobRequest -from google.cloud.dataproc_v1.types.jobs import GetJobRequest -from google.cloud.dataproc_v1.types.jobs import HadoopJob -from google.cloud.dataproc_v1.types.jobs import HiveJob -from google.cloud.dataproc_v1.types.jobs import Job -from google.cloud.dataproc_v1.types.jobs import JobMetadata -from google.cloud.dataproc_v1.types.jobs import JobPlacement -from google.cloud.dataproc_v1.types.jobs import JobReference -from google.cloud.dataproc_v1.types.jobs import JobScheduling -from google.cloud.dataproc_v1.types.jobs import JobStatus -from google.cloud.dataproc_v1.types.jobs import ListJobsRequest -from google.cloud.dataproc_v1.types.jobs import ListJobsResponse -from google.cloud.dataproc_v1.types.jobs import LoggingConfig -from google.cloud.dataproc_v1.types.jobs import PigJob -from google.cloud.dataproc_v1.types.jobs import PrestoJob -from google.cloud.dataproc_v1.types.jobs import PySparkJob -from google.cloud.dataproc_v1.types.jobs import QueryList -from google.cloud.dataproc_v1.types.jobs import SparkJob -from google.cloud.dataproc_v1.types.jobs import SparkRJob -from google.cloud.dataproc_v1.types.jobs import SparkSqlJob -from google.cloud.dataproc_v1.types.jobs import SubmitJobRequest -from google.cloud.dataproc_v1.types.jobs import UpdateJobRequest -from google.cloud.dataproc_v1.types.jobs import YarnApplication -from google.cloud.dataproc_v1.types.operations import ClusterOperationMetadata -from google.cloud.dataproc_v1.types.operations import ClusterOperationStatus -from google.cloud.dataproc_v1.types.shared import Component -from google.cloud.dataproc_v1.types.workflow_templates import ClusterOperation -from google.cloud.dataproc_v1.types.workflow_templates import ClusterSelector -from google.cloud.dataproc_v1.types.workflow_templates import CreateWorkflowTemplateRequest -from google.cloud.dataproc_v1.types.workflow_templates import DeleteWorkflowTemplateRequest -from google.cloud.dataproc_v1.types.workflow_templates import GetWorkflowTemplateRequest -from google.cloud.dataproc_v1.types.workflow_templates import InstantiateInlineWorkflowTemplateRequest -from google.cloud.dataproc_v1.types.workflow_templates import InstantiateWorkflowTemplateRequest -from google.cloud.dataproc_v1.types.workflow_templates import ListWorkflowTemplatesRequest -from google.cloud.dataproc_v1.types.workflow_templates import ListWorkflowTemplatesResponse -from google.cloud.dataproc_v1.types.workflow_templates import ManagedCluster -from google.cloud.dataproc_v1.types.workflow_templates import OrderedJob -from google.cloud.dataproc_v1.types.workflow_templates import ParameterValidation -from google.cloud.dataproc_v1.types.workflow_templates import RegexValidation -from google.cloud.dataproc_v1.types.workflow_templates import TemplateParameter -from google.cloud.dataproc_v1.types.workflow_templates import UpdateWorkflowTemplateRequest -from google.cloud.dataproc_v1.types.workflow_templates import ValueValidation -from google.cloud.dataproc_v1.types.workflow_templates import WorkflowGraph -from google.cloud.dataproc_v1.types.workflow_templates import WorkflowMetadata -from google.cloud.dataproc_v1.types.workflow_templates import WorkflowNode -from google.cloud.dataproc_v1.types.workflow_templates import WorkflowTemplate -from google.cloud.dataproc_v1.types.workflow_templates import WorkflowTemplatePlacement - -__all__ = ('AutoscalingPolicyServiceClient', - 'AutoscalingPolicyServiceAsyncClient', - 'ClusterControllerClient', - 'ClusterControllerAsyncClient', - 'JobControllerClient', - 'JobControllerAsyncClient', - 'WorkflowTemplateServiceClient', - 'WorkflowTemplateServiceAsyncClient', - 'AutoscalingPolicy', - 'BasicAutoscalingAlgorithm', - 'BasicYarnAutoscalingConfig', - 'CreateAutoscalingPolicyRequest', - 'DeleteAutoscalingPolicyRequest', - 'GetAutoscalingPolicyRequest', - 'InstanceGroupAutoscalingPolicyConfig', - 'ListAutoscalingPoliciesRequest', - 'ListAutoscalingPoliciesResponse', - 'UpdateAutoscalingPolicyRequest', - 'AcceleratorConfig', - 'AutoscalingConfig', - 'Cluster', - 'ClusterConfig', - 'ClusterMetrics', - 'ClusterStatus', - 'CreateClusterRequest', - 'DeleteClusterRequest', - 'DiagnoseClusterRequest', - 'DiagnoseClusterResults', - 'DiskConfig', - 'EncryptionConfig', - 'EndpointConfig', - 'GceClusterConfig', - 'GetClusterRequest', - 'GkeClusterConfig', - 'IdentityConfig', - 'InstanceGroupConfig', - 'KerberosConfig', - 'LifecycleConfig', - 'ListClustersRequest', - 'ListClustersResponse', - 'ManagedGroupConfig', - 'MetastoreConfig', - 'NodeGroupAffinity', - 'NodeInitializationAction', - 'ReservationAffinity', - 'SecurityConfig', - 'ShieldedInstanceConfig', - 'SoftwareConfig', - 'StartClusterRequest', - 'StopClusterRequest', - 'UpdateClusterRequest', - 'CancelJobRequest', - 'DeleteJobRequest', - 'GetJobRequest', - 'HadoopJob', - 'HiveJob', - 'Job', - 'JobMetadata', - 'JobPlacement', - 'JobReference', - 'JobScheduling', - 'JobStatus', - 'ListJobsRequest', - 'ListJobsResponse', - 'LoggingConfig', - 'PigJob', - 'PrestoJob', - 'PySparkJob', - 'QueryList', - 'SparkJob', - 'SparkRJob', - 'SparkSqlJob', - 'SubmitJobRequest', - 'UpdateJobRequest', - 'YarnApplication', - 'ClusterOperationMetadata', - 'ClusterOperationStatus', - 'Component', - 'ClusterOperation', - 'ClusterSelector', - 'CreateWorkflowTemplateRequest', - 'DeleteWorkflowTemplateRequest', - 'GetWorkflowTemplateRequest', - 'InstantiateInlineWorkflowTemplateRequest', - 'InstantiateWorkflowTemplateRequest', - 'ListWorkflowTemplatesRequest', - 'ListWorkflowTemplatesResponse', - 'ManagedCluster', - 'OrderedJob', - 'ParameterValidation', - 'RegexValidation', - 'TemplateParameter', - 'UpdateWorkflowTemplateRequest', - 'ValueValidation', - 'WorkflowGraph', - 'WorkflowMetadata', - 'WorkflowNode', - 'WorkflowTemplate', - 'WorkflowTemplatePlacement', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc/py.typed b/owl-bot-staging/v1/google/cloud/dataproc/py.typed deleted file mode 100644 index aac99cba..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-dataproc package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/__init__.py deleted file mode 100644 index 278cb5d9..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/__init__.py +++ /dev/null @@ -1,218 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from .services.autoscaling_policy_service import AutoscalingPolicyServiceClient -from .services.autoscaling_policy_service import AutoscalingPolicyServiceAsyncClient -from .services.cluster_controller import ClusterControllerClient -from .services.cluster_controller import ClusterControllerAsyncClient -from .services.job_controller import JobControllerClient -from .services.job_controller import JobControllerAsyncClient -from .services.workflow_template_service import WorkflowTemplateServiceClient -from .services.workflow_template_service import WorkflowTemplateServiceAsyncClient - -from .types.autoscaling_policies import AutoscalingPolicy -from .types.autoscaling_policies import BasicAutoscalingAlgorithm -from .types.autoscaling_policies import BasicYarnAutoscalingConfig -from .types.autoscaling_policies import CreateAutoscalingPolicyRequest -from .types.autoscaling_policies import DeleteAutoscalingPolicyRequest -from .types.autoscaling_policies import GetAutoscalingPolicyRequest -from .types.autoscaling_policies import InstanceGroupAutoscalingPolicyConfig -from .types.autoscaling_policies import ListAutoscalingPoliciesRequest -from .types.autoscaling_policies import ListAutoscalingPoliciesResponse -from .types.autoscaling_policies import UpdateAutoscalingPolicyRequest -from .types.clusters import AcceleratorConfig -from .types.clusters import AutoscalingConfig -from .types.clusters import Cluster -from .types.clusters import ClusterConfig -from .types.clusters import ClusterMetrics -from .types.clusters import ClusterStatus -from .types.clusters import CreateClusterRequest -from .types.clusters import DeleteClusterRequest -from .types.clusters import DiagnoseClusterRequest -from .types.clusters import DiagnoseClusterResults -from .types.clusters import DiskConfig -from .types.clusters import EncryptionConfig -from .types.clusters import EndpointConfig -from .types.clusters import GceClusterConfig -from .types.clusters import GetClusterRequest -from .types.clusters import GkeClusterConfig -from .types.clusters import IdentityConfig -from .types.clusters import InstanceGroupConfig -from .types.clusters import KerberosConfig -from .types.clusters import LifecycleConfig -from .types.clusters import ListClustersRequest -from .types.clusters import ListClustersResponse -from .types.clusters import ManagedGroupConfig -from .types.clusters import MetastoreConfig -from .types.clusters import NodeGroupAffinity -from .types.clusters import NodeInitializationAction -from .types.clusters import ReservationAffinity -from .types.clusters import SecurityConfig -from .types.clusters import ShieldedInstanceConfig -from .types.clusters import SoftwareConfig -from .types.clusters import StartClusterRequest -from .types.clusters import StopClusterRequest -from .types.clusters import UpdateClusterRequest -from .types.jobs import CancelJobRequest -from .types.jobs import DeleteJobRequest -from .types.jobs import GetJobRequest -from .types.jobs import HadoopJob -from .types.jobs import HiveJob -from .types.jobs import Job -from .types.jobs import JobMetadata -from .types.jobs import JobPlacement -from .types.jobs import JobReference -from .types.jobs import JobScheduling -from .types.jobs import JobStatus -from .types.jobs import ListJobsRequest -from .types.jobs import ListJobsResponse -from .types.jobs import LoggingConfig -from .types.jobs import PigJob -from .types.jobs import PrestoJob -from .types.jobs import PySparkJob -from .types.jobs import QueryList -from .types.jobs import SparkJob -from .types.jobs import SparkRJob -from .types.jobs import SparkSqlJob -from .types.jobs import SubmitJobRequest -from .types.jobs import UpdateJobRequest -from .types.jobs import YarnApplication -from .types.operations import ClusterOperationMetadata -from .types.operations import ClusterOperationStatus -from .types.shared import Component -from .types.workflow_templates import ClusterOperation -from .types.workflow_templates import ClusterSelector -from .types.workflow_templates import CreateWorkflowTemplateRequest -from .types.workflow_templates import DeleteWorkflowTemplateRequest -from .types.workflow_templates import GetWorkflowTemplateRequest -from .types.workflow_templates import InstantiateInlineWorkflowTemplateRequest -from .types.workflow_templates import InstantiateWorkflowTemplateRequest -from .types.workflow_templates import ListWorkflowTemplatesRequest -from .types.workflow_templates import ListWorkflowTemplatesResponse -from .types.workflow_templates import ManagedCluster -from .types.workflow_templates import OrderedJob -from .types.workflow_templates import ParameterValidation -from .types.workflow_templates import RegexValidation -from .types.workflow_templates import TemplateParameter -from .types.workflow_templates import UpdateWorkflowTemplateRequest -from .types.workflow_templates import ValueValidation -from .types.workflow_templates import WorkflowGraph -from .types.workflow_templates import WorkflowMetadata -from .types.workflow_templates import WorkflowNode -from .types.workflow_templates import WorkflowTemplate -from .types.workflow_templates import WorkflowTemplatePlacement - -__all__ = ( - 'AutoscalingPolicyServiceAsyncClient', - 'ClusterControllerAsyncClient', - 'JobControllerAsyncClient', - 'WorkflowTemplateServiceAsyncClient', -'AcceleratorConfig', -'AutoscalingConfig', -'AutoscalingPolicy', -'AutoscalingPolicyServiceClient', -'BasicAutoscalingAlgorithm', -'BasicYarnAutoscalingConfig', -'CancelJobRequest', -'Cluster', -'ClusterConfig', -'ClusterControllerClient', -'ClusterMetrics', -'ClusterOperation', -'ClusterOperationMetadata', -'ClusterOperationStatus', -'ClusterSelector', -'ClusterStatus', -'Component', -'CreateAutoscalingPolicyRequest', -'CreateClusterRequest', -'CreateWorkflowTemplateRequest', -'DeleteAutoscalingPolicyRequest', -'DeleteClusterRequest', -'DeleteJobRequest', -'DeleteWorkflowTemplateRequest', -'DiagnoseClusterRequest', -'DiagnoseClusterResults', -'DiskConfig', -'EncryptionConfig', -'EndpointConfig', -'GceClusterConfig', -'GetAutoscalingPolicyRequest', -'GetClusterRequest', -'GetJobRequest', -'GetWorkflowTemplateRequest', -'GkeClusterConfig', -'HadoopJob', -'HiveJob', -'IdentityConfig', -'InstanceGroupAutoscalingPolicyConfig', -'InstanceGroupConfig', -'InstantiateInlineWorkflowTemplateRequest', -'InstantiateWorkflowTemplateRequest', -'Job', -'JobControllerClient', -'JobMetadata', -'JobPlacement', -'JobReference', -'JobScheduling', -'JobStatus', -'KerberosConfig', -'LifecycleConfig', -'ListAutoscalingPoliciesRequest', -'ListAutoscalingPoliciesResponse', -'ListClustersRequest', -'ListClustersResponse', -'ListJobsRequest', -'ListJobsResponse', -'ListWorkflowTemplatesRequest', -'ListWorkflowTemplatesResponse', -'LoggingConfig', -'ManagedCluster', -'ManagedGroupConfig', -'MetastoreConfig', -'NodeGroupAffinity', -'NodeInitializationAction', -'OrderedJob', -'ParameterValidation', -'PigJob', -'PrestoJob', -'PySparkJob', -'QueryList', -'RegexValidation', -'ReservationAffinity', -'SecurityConfig', -'ShieldedInstanceConfig', -'SoftwareConfig', -'SparkJob', -'SparkRJob', -'SparkSqlJob', -'StartClusterRequest', -'StopClusterRequest', -'SubmitJobRequest', -'TemplateParameter', -'UpdateAutoscalingPolicyRequest', -'UpdateClusterRequest', -'UpdateJobRequest', -'UpdateWorkflowTemplateRequest', -'ValueValidation', -'WorkflowGraph', -'WorkflowMetadata', -'WorkflowNode', -'WorkflowTemplate', -'WorkflowTemplatePlacement', -'WorkflowTemplateServiceClient', -'YarnApplication', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/dataproc_v1/gapic_metadata.json deleted file mode 100644 index 2d068a45..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/gapic_metadata.json +++ /dev/null @@ -1,335 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.dataproc_v1", - "protoPackage": "google.cloud.dataproc.v1", - "schema": "1.0", - "services": { - "AutoscalingPolicyService": { - "clients": { - "grpc": { - "libraryClient": "AutoscalingPolicyServiceClient", - "rpcs": { - "CreateAutoscalingPolicy": { - "methods": [ - "create_autoscaling_policy" - ] - }, - "DeleteAutoscalingPolicy": { - "methods": [ - "delete_autoscaling_policy" - ] - }, - "GetAutoscalingPolicy": { - "methods": [ - "get_autoscaling_policy" - ] - }, - "ListAutoscalingPolicies": { - "methods": [ - "list_autoscaling_policies" - ] - }, - "UpdateAutoscalingPolicy": { - "methods": [ - "update_autoscaling_policy" - ] - } - } - }, - "grpc-async": { - "libraryClient": "AutoscalingPolicyServiceAsyncClient", - "rpcs": { - "CreateAutoscalingPolicy": { - "methods": [ - "create_autoscaling_policy" - ] - }, - "DeleteAutoscalingPolicy": { - "methods": [ - "delete_autoscaling_policy" - ] - }, - "GetAutoscalingPolicy": { - "methods": [ - "get_autoscaling_policy" - ] - }, - "ListAutoscalingPolicies": { - "methods": [ - "list_autoscaling_policies" - ] - }, - "UpdateAutoscalingPolicy": { - "methods": [ - "update_autoscaling_policy" - ] - } - } - } - } - }, - "ClusterController": { - "clients": { - "grpc": { - "libraryClient": "ClusterControllerClient", - "rpcs": { - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DiagnoseCluster": { - "methods": [ - "diagnose_cluster" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "StartCluster": { - "methods": [ - "start_cluster" - ] - }, - "StopCluster": { - "methods": [ - "stop_cluster" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - } - } - }, - "grpc-async": { - "libraryClient": "ClusterControllerAsyncClient", - "rpcs": { - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DiagnoseCluster": { - "methods": [ - "diagnose_cluster" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "StartCluster": { - "methods": [ - "start_cluster" - ] - }, - "StopCluster": { - "methods": [ - "stop_cluster" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - } - } - } - } - }, - "JobController": { - "clients": { - "grpc": { - "libraryClient": "JobControllerClient", - "rpcs": { - "CancelJob": { - "methods": [ - "cancel_job" - ] - }, - "DeleteJob": { - "methods": [ - "delete_job" - ] - }, - "GetJob": { - "methods": [ - "get_job" - ] - }, - "ListJobs": { - "methods": [ - "list_jobs" - ] - }, - "SubmitJob": { - "methods": [ - "submit_job" - ] - }, - "SubmitJobAsOperation": { - "methods": [ - "submit_job_as_operation" - ] - }, - "UpdateJob": { - "methods": [ - "update_job" - ] - } - } - }, - "grpc-async": { - "libraryClient": "JobControllerAsyncClient", - "rpcs": { - "CancelJob": { - "methods": [ - "cancel_job" - ] - }, - "DeleteJob": { - "methods": [ - "delete_job" - ] - }, - "GetJob": { - "methods": [ - "get_job" - ] - }, - "ListJobs": { - "methods": [ - "list_jobs" - ] - }, - "SubmitJob": { - "methods": [ - "submit_job" - ] - }, - "SubmitJobAsOperation": { - "methods": [ - "submit_job_as_operation" - ] - }, - "UpdateJob": { - "methods": [ - "update_job" - ] - } - } - } - } - }, - "WorkflowTemplateService": { - "clients": { - "grpc": { - "libraryClient": "WorkflowTemplateServiceClient", - "rpcs": { - "CreateWorkflowTemplate": { - "methods": [ - "create_workflow_template" - ] - }, - "DeleteWorkflowTemplate": { - "methods": [ - "delete_workflow_template" - ] - }, - "GetWorkflowTemplate": { - "methods": [ - "get_workflow_template" - ] - }, - "InstantiateInlineWorkflowTemplate": { - "methods": [ - "instantiate_inline_workflow_template" - ] - }, - "InstantiateWorkflowTemplate": { - "methods": [ - "instantiate_workflow_template" - ] - }, - "ListWorkflowTemplates": { - "methods": [ - "list_workflow_templates" - ] - }, - "UpdateWorkflowTemplate": { - "methods": [ - "update_workflow_template" - ] - } - } - }, - "grpc-async": { - "libraryClient": "WorkflowTemplateServiceAsyncClient", - "rpcs": { - "CreateWorkflowTemplate": { - "methods": [ - "create_workflow_template" - ] - }, - "DeleteWorkflowTemplate": { - "methods": [ - "delete_workflow_template" - ] - }, - "GetWorkflowTemplate": { - "methods": [ - "get_workflow_template" - ] - }, - "InstantiateInlineWorkflowTemplate": { - "methods": [ - "instantiate_inline_workflow_template" - ] - }, - "InstantiateWorkflowTemplate": { - "methods": [ - "instantiate_workflow_template" - ] - }, - "ListWorkflowTemplates": { - "methods": [ - "list_workflow_templates" - ] - }, - "UpdateWorkflowTemplate": { - "methods": [ - "update_workflow_template" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/py.typed b/owl-bot-staging/v1/google/cloud/dataproc_v1/py.typed deleted file mode 100644 index aac99cba..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-dataproc package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/__init__.py deleted file mode 100644 index 4de65971..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/__init__.py deleted file mode 100644 index 2401da6f..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import AutoscalingPolicyServiceClient -from .async_client import AutoscalingPolicyServiceAsyncClient - -__all__ = ( - 'AutoscalingPolicyServiceClient', - 'AutoscalingPolicyServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py deleted file mode 100644 index 19463b1c..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py +++ /dev/null @@ -1,624 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.dataproc_v1.services.autoscaling_policy_service import pagers -from google.cloud.dataproc_v1.types import autoscaling_policies -from .transports.base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport -from .client import AutoscalingPolicyServiceClient - - -class AutoscalingPolicyServiceAsyncClient: - """The API interface for managing autoscaling policies in the - Dataproc API. - """ - - _client: AutoscalingPolicyServiceClient - - DEFAULT_ENDPOINT = AutoscalingPolicyServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = AutoscalingPolicyServiceClient.DEFAULT_MTLS_ENDPOINT - - autoscaling_policy_path = staticmethod(AutoscalingPolicyServiceClient.autoscaling_policy_path) - parse_autoscaling_policy_path = staticmethod(AutoscalingPolicyServiceClient.parse_autoscaling_policy_path) - common_billing_account_path = staticmethod(AutoscalingPolicyServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(AutoscalingPolicyServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(AutoscalingPolicyServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_organization_path) - common_project_path = staticmethod(AutoscalingPolicyServiceClient.common_project_path) - parse_common_project_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_project_path) - common_location_path = staticmethod(AutoscalingPolicyServiceClient.common_location_path) - parse_common_location_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoscalingPolicyServiceAsyncClient: The constructed client. - """ - return AutoscalingPolicyServiceClient.from_service_account_info.__func__(AutoscalingPolicyServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoscalingPolicyServiceAsyncClient: The constructed client. - """ - return AutoscalingPolicyServiceClient.from_service_account_file.__func__(AutoscalingPolicyServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> AutoscalingPolicyServiceTransport: - """Returns the transport used by the client instance. - - Returns: - AutoscalingPolicyServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(AutoscalingPolicyServiceClient).get_transport_class, type(AutoscalingPolicyServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, AutoscalingPolicyServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the autoscaling policy service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.AutoscalingPolicyServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = AutoscalingPolicyServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_autoscaling_policy(self, - request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, - *, - parent: str = None, - policy: autoscaling_policies.AutoscalingPolicy = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> autoscaling_policies.AutoscalingPolicy: - r"""Creates new autoscaling policy. - - Args: - request (:class:`google.cloud.dataproc_v1.types.CreateAutoscalingPolicyRequest`): - The request object. A request to create an autoscaling - policy. - parent (:class:`str`): - Required. The "resource name" of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.create``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For - ``projects.locations.autoscalingPolicies.create``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - policy (:class:`google.cloud.dataproc_v1.types.AutoscalingPolicy`): - Required. The autoscaling policy to - create. - - This corresponds to the ``policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.AutoscalingPolicy: - Describes an autoscaling policy for - Dataproc cluster autoscaler. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, policy]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = autoscaling_policies.CreateAutoscalingPolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if policy is not None: - request.policy = policy - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_autoscaling_policy, - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_autoscaling_policy(self, - request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, - *, - policy: autoscaling_policies.AutoscalingPolicy = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> autoscaling_policies.AutoscalingPolicy: - r"""Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. - - Args: - request (:class:`google.cloud.dataproc_v1.types.UpdateAutoscalingPolicyRequest`): - The request object. A request to update an autoscaling - policy. - policy (:class:`google.cloud.dataproc_v1.types.AutoscalingPolicy`): - Required. The updated autoscaling - policy. - - This corresponds to the ``policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.AutoscalingPolicy: - Describes an autoscaling policy for - Dataproc cluster autoscaler. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([policy]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if policy is not None: - request.policy = policy - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_autoscaling_policy, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("policy.name", request.policy.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_autoscaling_policy(self, - request: autoscaling_policies.GetAutoscalingPolicyRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> autoscaling_policies.AutoscalingPolicy: - r"""Retrieves autoscaling policy. - - Args: - request (:class:`google.cloud.dataproc_v1.types.GetAutoscalingPolicyRequest`): - The request object. A request to fetch an autoscaling - policy. - name (:class:`str`): - Required. The "resource name" of the autoscaling policy, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.get``, the - resource name of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies.get``, - the resource name of the policy has the following - format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.AutoscalingPolicy: - Describes an autoscaling policy for - Dataproc cluster autoscaler. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = autoscaling_policies.GetAutoscalingPolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_autoscaling_policy, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_autoscaling_policies(self, - request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAutoscalingPoliciesAsyncPager: - r"""Lists autoscaling policies in the project. - - Args: - request (:class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest`): - The request object. A request to list autoscaling - policies in a project. - parent (:class:`str`): - Required. The "resource name" of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.list``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.list``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.services.autoscaling_policy_service.pagers.ListAutoscalingPoliciesAsyncPager: - A response to a request to list - autoscaling policies in a project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_autoscaling_policies, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListAutoscalingPoliciesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_autoscaling_policy(self, - request: autoscaling_policies.DeleteAutoscalingPolicyRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes an autoscaling policy. It is an error to - delete an autoscaling policy that is in use by one or - more clusters. - - Args: - request (:class:`google.cloud.dataproc_v1.types.DeleteAutoscalingPolicyRequest`): - The request object. A request to delete an autoscaling - policy. - Autoscaling policies in use by one or more clusters will - not be deleted. - name (:class:`str`): - Required. The "resource name" of the autoscaling policy, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.delete``, - the resource name of the policy has the following - format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For - ``projects.locations.autoscalingPolicies.delete``, - the resource name of the policy has the following - format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = autoscaling_policies.DeleteAutoscalingPolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_autoscaling_policy, - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "AutoscalingPolicyServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py deleted file mode 100644 index 014ca1b3..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py +++ /dev/null @@ -1,790 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.dataproc_v1.services.autoscaling_policy_service import pagers -from google.cloud.dataproc_v1.types import autoscaling_policies -from .transports.base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import AutoscalingPolicyServiceGrpcTransport -from .transports.grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport - - -class AutoscalingPolicyServiceClientMeta(type): - """Metaclass for the AutoscalingPolicyService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[AutoscalingPolicyServiceTransport]] - _transport_registry["grpc"] = AutoscalingPolicyServiceGrpcTransport - _transport_registry["grpc_asyncio"] = AutoscalingPolicyServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[AutoscalingPolicyServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class AutoscalingPolicyServiceClient(metaclass=AutoscalingPolicyServiceClientMeta): - """The API interface for managing autoscaling policies in the - Dataproc API. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "dataproc.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoscalingPolicyServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoscalingPolicyServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> AutoscalingPolicyServiceTransport: - """Returns the transport used by the client instance. - - Returns: - AutoscalingPolicyServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def autoscaling_policy_path(project: str,location: str,autoscaling_policy: str,) -> str: - """Returns a fully-qualified autoscaling_policy string.""" - return "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format(project=project, location=location, autoscaling_policy=autoscaling_policy, ) - - @staticmethod - def parse_autoscaling_policy_path(path: str) -> Dict[str,str]: - """Parses a autoscaling_policy path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/autoscalingPolicies/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, AutoscalingPolicyServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the autoscaling policy service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, AutoscalingPolicyServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, AutoscalingPolicyServiceTransport): - # transport is a AutoscalingPolicyServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_autoscaling_policy(self, - request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, - *, - parent: str = None, - policy: autoscaling_policies.AutoscalingPolicy = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> autoscaling_policies.AutoscalingPolicy: - r"""Creates new autoscaling policy. - - Args: - request (google.cloud.dataproc_v1.types.CreateAutoscalingPolicyRequest): - The request object. A request to create an autoscaling - policy. - parent (str): - Required. The "resource name" of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.create``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For - ``projects.locations.autoscalingPolicies.create``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - policy (google.cloud.dataproc_v1.types.AutoscalingPolicy): - Required. The autoscaling policy to - create. - - This corresponds to the ``policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.AutoscalingPolicy: - Describes an autoscaling policy for - Dataproc cluster autoscaler. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, policy]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a autoscaling_policies.CreateAutoscalingPolicyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, autoscaling_policies.CreateAutoscalingPolicyRequest): - request = autoscaling_policies.CreateAutoscalingPolicyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if policy is not None: - request.policy = policy - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_autoscaling_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_autoscaling_policy(self, - request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, - *, - policy: autoscaling_policies.AutoscalingPolicy = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> autoscaling_policies.AutoscalingPolicy: - r"""Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. - - Args: - request (google.cloud.dataproc_v1.types.UpdateAutoscalingPolicyRequest): - The request object. A request to update an autoscaling - policy. - policy (google.cloud.dataproc_v1.types.AutoscalingPolicy): - Required. The updated autoscaling - policy. - - This corresponds to the ``policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.AutoscalingPolicy: - Describes an autoscaling policy for - Dataproc cluster autoscaler. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([policy]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a autoscaling_policies.UpdateAutoscalingPolicyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, autoscaling_policies.UpdateAutoscalingPolicyRequest): - request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if policy is not None: - request.policy = policy - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_autoscaling_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("policy.name", request.policy.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_autoscaling_policy(self, - request: autoscaling_policies.GetAutoscalingPolicyRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> autoscaling_policies.AutoscalingPolicy: - r"""Retrieves autoscaling policy. - - Args: - request (google.cloud.dataproc_v1.types.GetAutoscalingPolicyRequest): - The request object. A request to fetch an autoscaling - policy. - name (str): - Required. The "resource name" of the autoscaling policy, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.get``, the - resource name of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies.get``, - the resource name of the policy has the following - format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.AutoscalingPolicy: - Describes an autoscaling policy for - Dataproc cluster autoscaler. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a autoscaling_policies.GetAutoscalingPolicyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, autoscaling_policies.GetAutoscalingPolicyRequest): - request = autoscaling_policies.GetAutoscalingPolicyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_autoscaling_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_autoscaling_policies(self, - request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAutoscalingPoliciesPager: - r"""Lists autoscaling policies in the project. - - Args: - request (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest): - The request object. A request to list autoscaling - policies in a project. - parent (str): - Required. The "resource name" of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.list``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.list``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.services.autoscaling_policy_service.pagers.ListAutoscalingPoliciesPager: - A response to a request to list - autoscaling policies in a project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a autoscaling_policies.ListAutoscalingPoliciesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, autoscaling_policies.ListAutoscalingPoliciesRequest): - request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_autoscaling_policies] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListAutoscalingPoliciesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_autoscaling_policy(self, - request: autoscaling_policies.DeleteAutoscalingPolicyRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes an autoscaling policy. It is an error to - delete an autoscaling policy that is in use by one or - more clusters. - - Args: - request (google.cloud.dataproc_v1.types.DeleteAutoscalingPolicyRequest): - The request object. A request to delete an autoscaling - policy. - Autoscaling policies in use by one or more clusters will - not be deleted. - name (str): - Required. The "resource name" of the autoscaling policy, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.delete``, - the resource name of the policy has the following - format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For - ``projects.locations.autoscalingPolicies.delete``, - the resource name of the policy has the following - format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a autoscaling_policies.DeleteAutoscalingPolicyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, autoscaling_policies.DeleteAutoscalingPolicyRequest): - request = autoscaling_policies.DeleteAutoscalingPolicyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_autoscaling_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "AutoscalingPolicyServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py deleted file mode 100644 index 938cb5e8..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.dataproc_v1.types import autoscaling_policies - - -class ListAutoscalingPoliciesPager: - """A pager for iterating through ``list_autoscaling_policies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``policies`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListAutoscalingPolicies`` requests and continue to iterate - through the ``policies`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., autoscaling_policies.ListAutoscalingPoliciesResponse], - request: autoscaling_policies.ListAutoscalingPoliciesRequest, - response: autoscaling_policies.ListAutoscalingPoliciesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest): - The initial request object. - response (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[autoscaling_policies.ListAutoscalingPoliciesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[autoscaling_policies.AutoscalingPolicy]: - for page in self.pages: - yield from page.policies - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListAutoscalingPoliciesAsyncPager: - """A pager for iterating through ``list_autoscaling_policies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``policies`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListAutoscalingPolicies`` requests and continue to iterate - through the ``policies`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse]], - request: autoscaling_policies.ListAutoscalingPoliciesRequest, - response: autoscaling_policies.ListAutoscalingPoliciesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest): - The initial request object. - response (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[autoscaling_policies.ListAutoscalingPoliciesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[autoscaling_policies.AutoscalingPolicy]: - async def async_generator(): - async for page in self.pages: - for response in page.policies: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py deleted file mode 100644 index 55ea5b98..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import AutoscalingPolicyServiceTransport -from .grpc import AutoscalingPolicyServiceGrpcTransport -from .grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[AutoscalingPolicyServiceTransport]] -_transport_registry['grpc'] = AutoscalingPolicyServiceGrpcTransport -_transport_registry['grpc_asyncio'] = AutoscalingPolicyServiceGrpcAsyncIOTransport - -__all__ = ( - 'AutoscalingPolicyServiceTransport', - 'AutoscalingPolicyServiceGrpcTransport', - 'AutoscalingPolicyServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py deleted file mode 100644 index 0f56bce9..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py +++ /dev/null @@ -1,246 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.dataproc_v1.types import autoscaling_policies -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-dataproc', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class AutoscalingPolicyServiceTransport(abc.ABC): - """Abstract transport class for AutoscalingPolicyService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'dataproc.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_autoscaling_policy: gapic_v1.method.wrap_method( - self.create_autoscaling_policy, - default_timeout=600.0, - client_info=client_info, - ), - self.update_autoscaling_policy: gapic_v1.method.wrap_method( - self.update_autoscaling_policy, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.get_autoscaling_policy: gapic_v1.method.wrap_method( - self.get_autoscaling_policy, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.list_autoscaling_policies: gapic_v1.method.wrap_method( - self.list_autoscaling_policies, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.delete_autoscaling_policy: gapic_v1.method.wrap_method( - self.delete_autoscaling_policy, - default_timeout=600.0, - client_info=client_info, - ), - } - - @property - def create_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.CreateAutoscalingPolicyRequest], - Union[ - autoscaling_policies.AutoscalingPolicy, - Awaitable[autoscaling_policies.AutoscalingPolicy] - ]]: - raise NotImplementedError() - - @property - def update_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.UpdateAutoscalingPolicyRequest], - Union[ - autoscaling_policies.AutoscalingPolicy, - Awaitable[autoscaling_policies.AutoscalingPolicy] - ]]: - raise NotImplementedError() - - @property - def get_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.GetAutoscalingPolicyRequest], - Union[ - autoscaling_policies.AutoscalingPolicy, - Awaitable[autoscaling_policies.AutoscalingPolicy] - ]]: - raise NotImplementedError() - - @property - def list_autoscaling_policies(self) -> Callable[ - [autoscaling_policies.ListAutoscalingPoliciesRequest], - Union[ - autoscaling_policies.ListAutoscalingPoliciesResponse, - Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.DeleteAutoscalingPolicyRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'AutoscalingPolicyServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py deleted file mode 100644 index 8803e2ab..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py +++ /dev/null @@ -1,363 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.dataproc_v1.types import autoscaling_policies -from google.protobuf import empty_pb2 # type: ignore -from .base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO - - -class AutoscalingPolicyServiceGrpcTransport(AutoscalingPolicyServiceTransport): - """gRPC backend transport for AutoscalingPolicyService. - - The API interface for managing autoscaling policies in the - Dataproc API. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def create_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.CreateAutoscalingPolicyRequest], - autoscaling_policies.AutoscalingPolicy]: - r"""Return a callable for the create autoscaling policy method over gRPC. - - Creates new autoscaling policy. - - Returns: - Callable[[~.CreateAutoscalingPolicyRequest], - ~.AutoscalingPolicy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_autoscaling_policy' not in self._stubs: - self._stubs['create_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.AutoscalingPolicyService/CreateAutoscalingPolicy', - request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize, - response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, - ) - return self._stubs['create_autoscaling_policy'] - - @property - def update_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.UpdateAutoscalingPolicyRequest], - autoscaling_policies.AutoscalingPolicy]: - r"""Return a callable for the update autoscaling policy method over gRPC. - - Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. - - Returns: - Callable[[~.UpdateAutoscalingPolicyRequest], - ~.AutoscalingPolicy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_autoscaling_policy' not in self._stubs: - self._stubs['update_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.AutoscalingPolicyService/UpdateAutoscalingPolicy', - request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize, - response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, - ) - return self._stubs['update_autoscaling_policy'] - - @property - def get_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.GetAutoscalingPolicyRequest], - autoscaling_policies.AutoscalingPolicy]: - r"""Return a callable for the get autoscaling policy method over gRPC. - - Retrieves autoscaling policy. - - Returns: - Callable[[~.GetAutoscalingPolicyRequest], - ~.AutoscalingPolicy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_autoscaling_policy' not in self._stubs: - self._stubs['get_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.AutoscalingPolicyService/GetAutoscalingPolicy', - request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize, - response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, - ) - return self._stubs['get_autoscaling_policy'] - - @property - def list_autoscaling_policies(self) -> Callable[ - [autoscaling_policies.ListAutoscalingPoliciesRequest], - autoscaling_policies.ListAutoscalingPoliciesResponse]: - r"""Return a callable for the list autoscaling policies method over gRPC. - - Lists autoscaling policies in the project. - - Returns: - Callable[[~.ListAutoscalingPoliciesRequest], - ~.ListAutoscalingPoliciesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_autoscaling_policies' not in self._stubs: - self._stubs['list_autoscaling_policies'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.AutoscalingPolicyService/ListAutoscalingPolicies', - request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize, - response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize, - ) - return self._stubs['list_autoscaling_policies'] - - @property - def delete_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.DeleteAutoscalingPolicyRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete autoscaling policy method over gRPC. - - Deletes an autoscaling policy. It is an error to - delete an autoscaling policy that is in use by one or - more clusters. - - Returns: - Callable[[~.DeleteAutoscalingPolicyRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_autoscaling_policy' not in self._stubs: - self._stubs['delete_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.AutoscalingPolicyService/DeleteAutoscalingPolicy', - request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_autoscaling_policy'] - - -__all__ = ( - 'AutoscalingPolicyServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py deleted file mode 100644 index 65ce3c4c..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,367 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.dataproc_v1.types import autoscaling_policies -from google.protobuf import empty_pb2 # type: ignore -from .base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import AutoscalingPolicyServiceGrpcTransport - - -class AutoscalingPolicyServiceGrpcAsyncIOTransport(AutoscalingPolicyServiceTransport): - """gRPC AsyncIO backend transport for AutoscalingPolicyService. - - The API interface for managing autoscaling policies in the - Dataproc API. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def create_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.CreateAutoscalingPolicyRequest], - Awaitable[autoscaling_policies.AutoscalingPolicy]]: - r"""Return a callable for the create autoscaling policy method over gRPC. - - Creates new autoscaling policy. - - Returns: - Callable[[~.CreateAutoscalingPolicyRequest], - Awaitable[~.AutoscalingPolicy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_autoscaling_policy' not in self._stubs: - self._stubs['create_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.AutoscalingPolicyService/CreateAutoscalingPolicy', - request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize, - response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, - ) - return self._stubs['create_autoscaling_policy'] - - @property - def update_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.UpdateAutoscalingPolicyRequest], - Awaitable[autoscaling_policies.AutoscalingPolicy]]: - r"""Return a callable for the update autoscaling policy method over gRPC. - - Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. - - Returns: - Callable[[~.UpdateAutoscalingPolicyRequest], - Awaitable[~.AutoscalingPolicy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_autoscaling_policy' not in self._stubs: - self._stubs['update_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.AutoscalingPolicyService/UpdateAutoscalingPolicy', - request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize, - response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, - ) - return self._stubs['update_autoscaling_policy'] - - @property - def get_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.GetAutoscalingPolicyRequest], - Awaitable[autoscaling_policies.AutoscalingPolicy]]: - r"""Return a callable for the get autoscaling policy method over gRPC. - - Retrieves autoscaling policy. - - Returns: - Callable[[~.GetAutoscalingPolicyRequest], - Awaitable[~.AutoscalingPolicy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_autoscaling_policy' not in self._stubs: - self._stubs['get_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.AutoscalingPolicyService/GetAutoscalingPolicy', - request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize, - response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, - ) - return self._stubs['get_autoscaling_policy'] - - @property - def list_autoscaling_policies(self) -> Callable[ - [autoscaling_policies.ListAutoscalingPoliciesRequest], - Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse]]: - r"""Return a callable for the list autoscaling policies method over gRPC. - - Lists autoscaling policies in the project. - - Returns: - Callable[[~.ListAutoscalingPoliciesRequest], - Awaitable[~.ListAutoscalingPoliciesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_autoscaling_policies' not in self._stubs: - self._stubs['list_autoscaling_policies'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.AutoscalingPolicyService/ListAutoscalingPolicies', - request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize, - response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize, - ) - return self._stubs['list_autoscaling_policies'] - - @property - def delete_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.DeleteAutoscalingPolicyRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete autoscaling policy method over gRPC. - - Deletes an autoscaling policy. It is an error to - delete an autoscaling policy that is in use by one or - more clusters. - - Returns: - Callable[[~.DeleteAutoscalingPolicyRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_autoscaling_policy' not in self._stubs: - self._stubs['delete_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.AutoscalingPolicyService/DeleteAutoscalingPolicy', - request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_autoscaling_policy'] - - -__all__ = ( - 'AutoscalingPolicyServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/__init__.py deleted file mode 100644 index 4b4a11d5..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import ClusterControllerClient -from .async_client import ClusterControllerAsyncClient - -__all__ = ( - 'ClusterControllerClient', - 'ClusterControllerAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/async_client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/async_client.py deleted file mode 100644 index 99ffb764..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/async_client.py +++ /dev/null @@ -1,1020 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.dataproc_v1.services.cluster_controller import pagers -from google.cloud.dataproc_v1.types import clusters -from google.cloud.dataproc_v1.types import operations -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from .transports.base import ClusterControllerTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport -from .client import ClusterControllerClient - - -class ClusterControllerAsyncClient: - """The ClusterControllerService provides methods to manage - clusters of Compute Engine instances. - """ - - _client: ClusterControllerClient - - DEFAULT_ENDPOINT = ClusterControllerClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = ClusterControllerClient.DEFAULT_MTLS_ENDPOINT - - cluster_path = staticmethod(ClusterControllerClient.cluster_path) - parse_cluster_path = staticmethod(ClusterControllerClient.parse_cluster_path) - service_path = staticmethod(ClusterControllerClient.service_path) - parse_service_path = staticmethod(ClusterControllerClient.parse_service_path) - common_billing_account_path = staticmethod(ClusterControllerClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(ClusterControllerClient.parse_common_billing_account_path) - common_folder_path = staticmethod(ClusterControllerClient.common_folder_path) - parse_common_folder_path = staticmethod(ClusterControllerClient.parse_common_folder_path) - common_organization_path = staticmethod(ClusterControllerClient.common_organization_path) - parse_common_organization_path = staticmethod(ClusterControllerClient.parse_common_organization_path) - common_project_path = staticmethod(ClusterControllerClient.common_project_path) - parse_common_project_path = staticmethod(ClusterControllerClient.parse_common_project_path) - common_location_path = staticmethod(ClusterControllerClient.common_location_path) - parse_common_location_path = staticmethod(ClusterControllerClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterControllerAsyncClient: The constructed client. - """ - return ClusterControllerClient.from_service_account_info.__func__(ClusterControllerAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterControllerAsyncClient: The constructed client. - """ - return ClusterControllerClient.from_service_account_file.__func__(ClusterControllerAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ClusterControllerTransport: - """Returns the transport used by the client instance. - - Returns: - ClusterControllerTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(ClusterControllerClient).get_transport_class, type(ClusterControllerClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, ClusterControllerTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the cluster controller client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.ClusterControllerTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = ClusterControllerClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_cluster(self, - request: clusters.CreateClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster: clusters.Cluster = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Args: - request (:class:`google.cloud.dataproc_v1.types.CreateClusterRequest`): - The request object. A request to create a cluster. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (:class:`google.cloud.dataproc_v1.types.Cluster`): - Required. The cluster to create. - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = clusters.CreateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster is not None: - request.cluster = cluster - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - clusters.Cluster, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - async def update_cluster(self, - request: clusters.UpdateClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - cluster: clusters.Cluster = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Args: - request (:class:`google.cloud.dataproc_v1.types.UpdateClusterRequest`): - The request object. A request to update a cluster. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project the cluster belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (:class:`str`): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (:class:`google.cloud.dataproc_v1.types.Cluster`): - Required. The changes to the cluster. - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Specifies the path, relative to ``Cluster``, - of the field to update. For example, to change the - number of workers in a cluster to 5, the ``update_mask`` - parameter would be specified as - ``config.worker_config.num_instances``, and the - ``PATCH`` request body would specify the new value, as - follows: - - :: - - { - "config":{ - "workerConfig":{ - "numInstances":"5" - } - } - } - - Similarly, to change the number of preemptible workers - in a cluster to 5, the ``update_mask`` parameter would - be ``config.secondary_worker_config.num_instances``, and - the ``PATCH`` request body would be set as follows: - - :: - - { - "config":{ - "secondaryWorkerConfig":{ - "numInstances":"5" - } - } - } - - Note: Currently, only the following fields can be - updated: - - .. raw:: html - - - - - - - - - - - - - - - - - - - - - - - -
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or - change autoscaling policies
- - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name, cluster, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = clusters.UpdateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - if cluster is not None: - request.cluster = cluster - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - clusters.Cluster, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - async def stop_cluster(self, - request: clusters.StopClusterRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Stops a cluster in a project. - - Args: - request (:class:`google.cloud.dataproc_v1.types.StopClusterRequest`): - The request object. A request to stop a cluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - request = clusters.StopClusterRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.stop_cluster, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - clusters.Cluster, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - async def start_cluster(self, - request: clusters.StartClusterRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Starts a cluster in a project. - - Args: - request (:class:`google.cloud.dataproc_v1.types.StartClusterRequest`): - The request object. A request to start a cluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - request = clusters.StartClusterRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.start_cluster, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - clusters.Cluster, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - async def delete_cluster(self, - request: clusters.DeleteClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Args: - request (:class:`google.cloud.dataproc_v1.types.DeleteClusterRequest`): - The request object. A request to delete a cluster. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (:class:`str`): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = clusters.DeleteClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_cluster(self, - request: clusters.GetClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> clusters.Cluster: - r"""Gets the resource representation for a cluster in a - project. - - Args: - request (:class:`google.cloud.dataproc_v1.types.GetClusterRequest`): - The request object. Request to get the resource - representation for a cluster in a project. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (:class:`str`): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.Cluster: - Describes the identifying - information, config, and status of a - cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = clusters.GetClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_clusters(self, - request: clusters.ListClustersRequest = None, - *, - project_id: str = None, - region: str = None, - filter: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListClustersAsyncPager: - r"""Lists all regions/{region}/clusters in a project - alphabetically. - - Args: - request (:class:`google.cloud.dataproc_v1.types.ListClustersRequest`): - The request object. A request to list the clusters in a - project. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - filter (:class:`str`): - Optional. A filter constraining the clusters to list. - Filters are case-sensitive and have the following - syntax: - - field = value [AND [field = value]] ... - - where **field** is one of ``status.state``, - ``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a - label key. **value** can be ``*`` to match all values. - ``status.state`` can be one of the following: - ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, - ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` - contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` - states. ``INACTIVE`` contains the ``DELETING`` and - ``ERROR`` states. ``clusterName`` is the name of the - cluster provided at creation time. Only the logical - ``AND`` operator is supported; space-separated items are - treated as having an implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND clusterName = mycluster AND - labels.env = staging AND labels.starred = \* - - This corresponds to the ``filter`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.services.cluster_controller.pagers.ListClustersAsyncPager: - The list of all clusters in a - project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, filter]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = clusters.ListClustersRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if filter is not None: - request.filter = filter - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_clusters, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListClustersAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def diagnose_cluster(self, - request: clusters.DiagnoseClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Gets cluster diagnostic information. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - After the operation completes, - [Operation.response][google.longrunning.Operation.response] - contains - `DiagnoseClusterResults `__. - - Args: - request (:class:`google.cloud.dataproc_v1.types.DiagnoseClusterRequest`): - The request object. A request to collect cluster - diagnostic information. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (:class:`str`): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.dataproc_v1.types.DiagnoseClusterResults` - The location of diagnostic output. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = clusters.DiagnoseClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.diagnose_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - clusters.DiagnoseClusterResults, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ClusterControllerAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/client.py deleted file mode 100644 index abf7a008..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/client.py +++ /dev/null @@ -1,1178 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.dataproc_v1.services.cluster_controller import pagers -from google.cloud.dataproc_v1.types import clusters -from google.cloud.dataproc_v1.types import operations -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from .transports.base import ClusterControllerTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import ClusterControllerGrpcTransport -from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport - - -class ClusterControllerClientMeta(type): - """Metaclass for the ClusterController client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[ClusterControllerTransport]] - _transport_registry["grpc"] = ClusterControllerGrpcTransport - _transport_registry["grpc_asyncio"] = ClusterControllerGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[ClusterControllerTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class ClusterControllerClient(metaclass=ClusterControllerClientMeta): - """The ClusterControllerService provides methods to manage - clusters of Compute Engine instances. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "dataproc.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterControllerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterControllerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ClusterControllerTransport: - """Returns the transport used by the client instance. - - Returns: - ClusterControllerTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def cluster_path(project: str,location: str,cluster: str,) -> str: - """Returns a fully-qualified cluster string.""" - return "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) - - @staticmethod - def parse_cluster_path(path: str) -> Dict[str,str]: - """Parses a cluster path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def service_path(project: str,location: str,service: str,) -> str: - """Returns a fully-qualified service string.""" - return "projects/{project}/locations/{location}/services/{service}".format(project=project, location=location, service=service, ) - - @staticmethod - def parse_service_path(path: str) -> Dict[str,str]: - """Parses a service path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/services/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, ClusterControllerTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the cluster controller client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ClusterControllerTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, ClusterControllerTransport): - # transport is a ClusterControllerTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_cluster(self, - request: clusters.CreateClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster: clusters.Cluster = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Creates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Args: - request (google.cloud.dataproc_v1.types.CreateClusterRequest): - The request object. A request to create a cluster. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (google.cloud.dataproc_v1.types.Cluster): - Required. The cluster to create. - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a clusters.CreateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.CreateClusterRequest): - request = clusters.CreateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster is not None: - request.cluster = cluster - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_cluster] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - clusters.Cluster, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - def update_cluster(self, - request: clusters.UpdateClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - cluster: clusters.Cluster = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Updates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Args: - request (google.cloud.dataproc_v1.types.UpdateClusterRequest): - The request object. A request to update a cluster. - project_id (str): - Required. The ID of the Google Cloud - Platform project the cluster belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (str): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (google.cloud.dataproc_v1.types.Cluster): - Required. The changes to the cluster. - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Specifies the path, relative to ``Cluster``, - of the field to update. For example, to change the - number of workers in a cluster to 5, the ``update_mask`` - parameter would be specified as - ``config.worker_config.num_instances``, and the - ``PATCH`` request body would specify the new value, as - follows: - - :: - - { - "config":{ - "workerConfig":{ - "numInstances":"5" - } - } - } - - Similarly, to change the number of preemptible workers - in a cluster to 5, the ``update_mask`` parameter would - be ``config.secondary_worker_config.num_instances``, and - the ``PATCH`` request body would be set as follows: - - :: - - { - "config":{ - "secondaryWorkerConfig":{ - "numInstances":"5" - } - } - } - - Note: Currently, only the following fields can be - updated: - - .. raw:: html - - - - - - - - - - - - - - - - - - - - - - - -
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or - change autoscaling policies
- - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name, cluster, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a clusters.UpdateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.UpdateClusterRequest): - request = clusters.UpdateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - if cluster is not None: - request.cluster = cluster - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_cluster] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - clusters.Cluster, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - def stop_cluster(self, - request: clusters.StopClusterRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Stops a cluster in a project. - - Args: - request (google.cloud.dataproc_v1.types.StopClusterRequest): - The request object. A request to stop a cluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a clusters.StopClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.StopClusterRequest): - request = clusters.StopClusterRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.stop_cluster] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - clusters.Cluster, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - def start_cluster(self, - request: clusters.StartClusterRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Starts a cluster in a project. - - Args: - request (google.cloud.dataproc_v1.types.StartClusterRequest): - The request object. A request to start a cluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a clusters.StartClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.StartClusterRequest): - request = clusters.StartClusterRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.start_cluster] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - clusters.Cluster, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - def delete_cluster(self, - request: clusters.DeleteClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Deletes a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Args: - request (google.cloud.dataproc_v1.types.DeleteClusterRequest): - The request object. A request to delete a cluster. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (str): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a clusters.DeleteClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.DeleteClusterRequest): - request = clusters.DeleteClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_cluster] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - def get_cluster(self, - request: clusters.GetClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> clusters.Cluster: - r"""Gets the resource representation for a cluster in a - project. - - Args: - request (google.cloud.dataproc_v1.types.GetClusterRequest): - The request object. Request to get the resource - representation for a cluster in a project. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (str): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.Cluster: - Describes the identifying - information, config, and status of a - cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a clusters.GetClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.GetClusterRequest): - request = clusters.GetClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_cluster] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_clusters(self, - request: clusters.ListClustersRequest = None, - *, - project_id: str = None, - region: str = None, - filter: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListClustersPager: - r"""Lists all regions/{region}/clusters in a project - alphabetically. - - Args: - request (google.cloud.dataproc_v1.types.ListClustersRequest): - The request object. A request to list the clusters in a - project. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - filter (str): - Optional. A filter constraining the clusters to list. - Filters are case-sensitive and have the following - syntax: - - field = value [AND [field = value]] ... - - where **field** is one of ``status.state``, - ``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a - label key. **value** can be ``*`` to match all values. - ``status.state`` can be one of the following: - ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, - ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` - contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` - states. ``INACTIVE`` contains the ``DELETING`` and - ``ERROR`` states. ``clusterName`` is the name of the - cluster provided at creation time. Only the logical - ``AND`` operator is supported; space-separated items are - treated as having an implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND clusterName = mycluster AND - labels.env = staging AND labels.starred = \* - - This corresponds to the ``filter`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.services.cluster_controller.pagers.ListClustersPager: - The list of all clusters in a - project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, filter]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a clusters.ListClustersRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.ListClustersRequest): - request = clusters.ListClustersRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if filter is not None: - request.filter = filter - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_clusters] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListClustersPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def diagnose_cluster(self, - request: clusters.DiagnoseClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Gets cluster diagnostic information. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - After the operation completes, - [Operation.response][google.longrunning.Operation.response] - contains - `DiagnoseClusterResults `__. - - Args: - request (google.cloud.dataproc_v1.types.DiagnoseClusterRequest): - The request object. A request to collect cluster - diagnostic information. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (str): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.dataproc_v1.types.DiagnoseClusterResults` - The location of diagnostic output. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a clusters.DiagnoseClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.DiagnoseClusterRequest): - request = clusters.DiagnoseClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.diagnose_cluster] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - clusters.DiagnoseClusterResults, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ClusterControllerClient", -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/pagers.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/pagers.py deleted file mode 100644 index 9afbfb8e..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/pagers.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.dataproc_v1.types import clusters - - -class ListClustersPager: - """A pager for iterating through ``list_clusters`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1.types.ListClustersResponse` object, and - provides an ``__iter__`` method to iterate through its - ``clusters`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListClusters`` requests and continue to iterate - through the ``clusters`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1.types.ListClustersResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., clusters.ListClustersResponse], - request: clusters.ListClustersRequest, - response: clusters.ListClustersResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1.types.ListClustersRequest): - The initial request object. - response (google.cloud.dataproc_v1.types.ListClustersResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = clusters.ListClustersRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[clusters.ListClustersResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[clusters.Cluster]: - for page in self.pages: - yield from page.clusters - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListClustersAsyncPager: - """A pager for iterating through ``list_clusters`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1.types.ListClustersResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``clusters`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListClusters`` requests and continue to iterate - through the ``clusters`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1.types.ListClustersResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[clusters.ListClustersResponse]], - request: clusters.ListClustersRequest, - response: clusters.ListClustersResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1.types.ListClustersRequest): - The initial request object. - response (google.cloud.dataproc_v1.types.ListClustersResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = clusters.ListClustersRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[clusters.ListClustersResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[clusters.Cluster]: - async def async_generator(): - async for page in self.pages: - for response in page.clusters: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py deleted file mode 100644 index 9c44d271..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import ClusterControllerTransport -from .grpc import ClusterControllerGrpcTransport -from .grpc_asyncio import ClusterControllerGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterControllerTransport]] -_transport_registry['grpc'] = ClusterControllerGrpcTransport -_transport_registry['grpc_asyncio'] = ClusterControllerGrpcAsyncIOTransport - -__all__ = ( - 'ClusterControllerTransport', - 'ClusterControllerGrpcTransport', - 'ClusterControllerGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py deleted file mode 100644 index 6e5fd590..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py +++ /dev/null @@ -1,313 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.dataproc_v1.types import clusters -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-dataproc', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class ClusterControllerTransport(abc.ABC): - """Abstract transport class for ClusterController.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'dataproc.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_cluster: gapic_v1.method.wrap_method( - self.create_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=client_info, - ), - self.update_cluster: gapic_v1.method.wrap_method( - self.update_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=client_info, - ), - self.stop_cluster: gapic_v1.method.wrap_method( - self.stop_cluster, - default_timeout=None, - client_info=client_info, - ), - self.start_cluster: gapic_v1.method.wrap_method( - self.start_cluster, - default_timeout=None, - client_info=client_info, - ), - self.delete_cluster: gapic_v1.method.wrap_method( - self.delete_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=client_info, - ), - self.get_cluster: gapic_v1.method.wrap_method( - self.get_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=client_info, - ), - self.list_clusters: gapic_v1.method.wrap_method( - self.list_clusters, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=client_info, - ), - self.diagnose_cluster: gapic_v1.method.wrap_method( - self.diagnose_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_cluster(self) -> Callable[ - [clusters.CreateClusterRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def update_cluster(self) -> Callable[ - [clusters.UpdateClusterRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def stop_cluster(self) -> Callable[ - [clusters.StopClusterRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def start_cluster(self) -> Callable[ - [clusters.StartClusterRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_cluster(self) -> Callable[ - [clusters.DeleteClusterRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_cluster(self) -> Callable[ - [clusters.GetClusterRequest], - Union[ - clusters.Cluster, - Awaitable[clusters.Cluster] - ]]: - raise NotImplementedError() - - @property - def list_clusters(self) -> Callable[ - [clusters.ListClustersRequest], - Union[ - clusters.ListClustersResponse, - Awaitable[clusters.ListClustersResponse] - ]]: - raise NotImplementedError() - - @property - def diagnose_cluster(self) -> Callable[ - [clusters.DiagnoseClusterRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'ClusterControllerTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py deleted file mode 100644 index e176b377..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py +++ /dev/null @@ -1,472 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.dataproc_v1.types import clusters -from google.longrunning import operations_pb2 # type: ignore -from .base import ClusterControllerTransport, DEFAULT_CLIENT_INFO - - -class ClusterControllerGrpcTransport(ClusterControllerTransport): - """gRPC backend transport for ClusterController. - - The ClusterControllerService provides methods to manage - clusters of Compute Engine instances. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_cluster(self) -> Callable[ - [clusters.CreateClusterRequest], - operations_pb2.Operation]: - r"""Return a callable for the create cluster method over gRPC. - - Creates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Returns: - Callable[[~.CreateClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_cluster' not in self._stubs: - self._stubs['create_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/CreateCluster', - request_serializer=clusters.CreateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_cluster'] - - @property - def update_cluster(self) -> Callable[ - [clusters.UpdateClusterRequest], - operations_pb2.Operation]: - r"""Return a callable for the update cluster method over gRPC. - - Updates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Returns: - Callable[[~.UpdateClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_cluster' not in self._stubs: - self._stubs['update_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/UpdateCluster', - request_serializer=clusters.UpdateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_cluster'] - - @property - def stop_cluster(self) -> Callable[ - [clusters.StopClusterRequest], - operations_pb2.Operation]: - r"""Return a callable for the stop cluster method over gRPC. - - Stops a cluster in a project. - - Returns: - Callable[[~.StopClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'stop_cluster' not in self._stubs: - self._stubs['stop_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/StopCluster', - request_serializer=clusters.StopClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['stop_cluster'] - - @property - def start_cluster(self) -> Callable[ - [clusters.StartClusterRequest], - operations_pb2.Operation]: - r"""Return a callable for the start cluster method over gRPC. - - Starts a cluster in a project. - - Returns: - Callable[[~.StartClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'start_cluster' not in self._stubs: - self._stubs['start_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/StartCluster', - request_serializer=clusters.StartClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['start_cluster'] - - @property - def delete_cluster(self) -> Callable[ - [clusters.DeleteClusterRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete cluster method over gRPC. - - Deletes a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Returns: - Callable[[~.DeleteClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_cluster' not in self._stubs: - self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/DeleteCluster', - request_serializer=clusters.DeleteClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_cluster'] - - @property - def get_cluster(self) -> Callable[ - [clusters.GetClusterRequest], - clusters.Cluster]: - r"""Return a callable for the get cluster method over gRPC. - - Gets the resource representation for a cluster in a - project. - - Returns: - Callable[[~.GetClusterRequest], - ~.Cluster]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_cluster' not in self._stubs: - self._stubs['get_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/GetCluster', - request_serializer=clusters.GetClusterRequest.serialize, - response_deserializer=clusters.Cluster.deserialize, - ) - return self._stubs['get_cluster'] - - @property - def list_clusters(self) -> Callable[ - [clusters.ListClustersRequest], - clusters.ListClustersResponse]: - r"""Return a callable for the list clusters method over gRPC. - - Lists all regions/{region}/clusters in a project - alphabetically. - - Returns: - Callable[[~.ListClustersRequest], - ~.ListClustersResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_clusters' not in self._stubs: - self._stubs['list_clusters'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/ListClusters', - request_serializer=clusters.ListClustersRequest.serialize, - response_deserializer=clusters.ListClustersResponse.deserialize, - ) - return self._stubs['list_clusters'] - - @property - def diagnose_cluster(self) -> Callable[ - [clusters.DiagnoseClusterRequest], - operations_pb2.Operation]: - r"""Return a callable for the diagnose cluster method over gRPC. - - Gets cluster diagnostic information. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - After the operation completes, - [Operation.response][google.longrunning.Operation.response] - contains - `DiagnoseClusterResults `__. - - Returns: - Callable[[~.DiagnoseClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'diagnose_cluster' not in self._stubs: - self._stubs['diagnose_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster', - request_serializer=clusters.DiagnoseClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['diagnose_cluster'] - - -__all__ = ( - 'ClusterControllerGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py deleted file mode 100644 index fc9b453b..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py +++ /dev/null @@ -1,476 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.dataproc_v1.types import clusters -from google.longrunning import operations_pb2 # type: ignore -from .base import ClusterControllerTransport, DEFAULT_CLIENT_INFO -from .grpc import ClusterControllerGrpcTransport - - -class ClusterControllerGrpcAsyncIOTransport(ClusterControllerTransport): - """gRPC AsyncIO backend transport for ClusterController. - - The ClusterControllerService provides methods to manage - clusters of Compute Engine instances. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_cluster(self) -> Callable[ - [clusters.CreateClusterRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create cluster method over gRPC. - - Creates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Returns: - Callable[[~.CreateClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_cluster' not in self._stubs: - self._stubs['create_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/CreateCluster', - request_serializer=clusters.CreateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_cluster'] - - @property - def update_cluster(self) -> Callable[ - [clusters.UpdateClusterRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update cluster method over gRPC. - - Updates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Returns: - Callable[[~.UpdateClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_cluster' not in self._stubs: - self._stubs['update_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/UpdateCluster', - request_serializer=clusters.UpdateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_cluster'] - - @property - def stop_cluster(self) -> Callable[ - [clusters.StopClusterRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the stop cluster method over gRPC. - - Stops a cluster in a project. - - Returns: - Callable[[~.StopClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'stop_cluster' not in self._stubs: - self._stubs['stop_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/StopCluster', - request_serializer=clusters.StopClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['stop_cluster'] - - @property - def start_cluster(self) -> Callable[ - [clusters.StartClusterRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the start cluster method over gRPC. - - Starts a cluster in a project. - - Returns: - Callable[[~.StartClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'start_cluster' not in self._stubs: - self._stubs['start_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/StartCluster', - request_serializer=clusters.StartClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['start_cluster'] - - @property - def delete_cluster(self) -> Callable[ - [clusters.DeleteClusterRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete cluster method over gRPC. - - Deletes a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Returns: - Callable[[~.DeleteClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_cluster' not in self._stubs: - self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/DeleteCluster', - request_serializer=clusters.DeleteClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_cluster'] - - @property - def get_cluster(self) -> Callable[ - [clusters.GetClusterRequest], - Awaitable[clusters.Cluster]]: - r"""Return a callable for the get cluster method over gRPC. - - Gets the resource representation for a cluster in a - project. - - Returns: - Callable[[~.GetClusterRequest], - Awaitable[~.Cluster]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_cluster' not in self._stubs: - self._stubs['get_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/GetCluster', - request_serializer=clusters.GetClusterRequest.serialize, - response_deserializer=clusters.Cluster.deserialize, - ) - return self._stubs['get_cluster'] - - @property - def list_clusters(self) -> Callable[ - [clusters.ListClustersRequest], - Awaitable[clusters.ListClustersResponse]]: - r"""Return a callable for the list clusters method over gRPC. - - Lists all regions/{region}/clusters in a project - alphabetically. - - Returns: - Callable[[~.ListClustersRequest], - Awaitable[~.ListClustersResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_clusters' not in self._stubs: - self._stubs['list_clusters'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/ListClusters', - request_serializer=clusters.ListClustersRequest.serialize, - response_deserializer=clusters.ListClustersResponse.deserialize, - ) - return self._stubs['list_clusters'] - - @property - def diagnose_cluster(self) -> Callable[ - [clusters.DiagnoseClusterRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the diagnose cluster method over gRPC. - - Gets cluster diagnostic information. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - After the operation completes, - [Operation.response][google.longrunning.Operation.response] - contains - `DiagnoseClusterResults `__. - - Returns: - Callable[[~.DiagnoseClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'diagnose_cluster' not in self._stubs: - self._stubs['diagnose_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster', - request_serializer=clusters.DiagnoseClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['diagnose_cluster'] - - -__all__ = ( - 'ClusterControllerGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/__init__.py deleted file mode 100644 index 19ac5a98..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import JobControllerClient -from .async_client import JobControllerAsyncClient - -__all__ = ( - 'JobControllerClient', - 'JobControllerAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/async_client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/async_client.py deleted file mode 100644 index 0fe3a2d0..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/async_client.py +++ /dev/null @@ -1,796 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.dataproc_v1.services.job_controller import pagers -from google.cloud.dataproc_v1.types import jobs -from .transports.base import JobControllerTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport -from .client import JobControllerClient - - -class JobControllerAsyncClient: - """The JobController provides methods to manage jobs.""" - - _client: JobControllerClient - - DEFAULT_ENDPOINT = JobControllerClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = JobControllerClient.DEFAULT_MTLS_ENDPOINT - - common_billing_account_path = staticmethod(JobControllerClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(JobControllerClient.parse_common_billing_account_path) - common_folder_path = staticmethod(JobControllerClient.common_folder_path) - parse_common_folder_path = staticmethod(JobControllerClient.parse_common_folder_path) - common_organization_path = staticmethod(JobControllerClient.common_organization_path) - parse_common_organization_path = staticmethod(JobControllerClient.parse_common_organization_path) - common_project_path = staticmethod(JobControllerClient.common_project_path) - parse_common_project_path = staticmethod(JobControllerClient.parse_common_project_path) - common_location_path = staticmethod(JobControllerClient.common_location_path) - parse_common_location_path = staticmethod(JobControllerClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobControllerAsyncClient: The constructed client. - """ - return JobControllerClient.from_service_account_info.__func__(JobControllerAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobControllerAsyncClient: The constructed client. - """ - return JobControllerClient.from_service_account_file.__func__(JobControllerAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> JobControllerTransport: - """Returns the transport used by the client instance. - - Returns: - JobControllerTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(JobControllerClient).get_transport_class, type(JobControllerClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, JobControllerTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the job controller client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.JobControllerTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = JobControllerClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def submit_job(self, - request: jobs.SubmitJobRequest = None, - *, - project_id: str = None, - region: str = None, - job: jobs.Job = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Submits a job to a cluster. - - Args: - request (:class:`google.cloud.dataproc_v1.types.SubmitJobRequest`): - The request object. A request to submit a job. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job (:class:`google.cloud.dataproc_v1.types.Job`): - Required. The job resource. - This corresponds to the ``job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = jobs.SubmitJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job is not None: - request.job = job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.submit_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def submit_job_as_operation(self, - request: jobs.SubmitJobRequest = None, - *, - project_id: str = None, - region: str = None, - job: jobs.Job = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Submits job to a cluster. - - Args: - request (:class:`google.cloud.dataproc_v1.types.SubmitJobRequest`): - The request object. A request to submit a job. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job (:class:`google.cloud.dataproc_v1.types.Job`): - Required. The job resource. - This corresponds to the ``job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.dataproc_v1.types.Job` A Dataproc - job resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = jobs.SubmitJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job is not None: - request.job = job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.submit_job_as_operation, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - jobs.Job, - metadata_type=jobs.JobMetadata, - ) - - # Done; return the response. - return response - - async def get_job(self, - request: jobs.GetJobRequest = None, - *, - project_id: str = None, - region: str = None, - job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Gets the resource representation for a job in a - project. - - Args: - request (:class:`google.cloud.dataproc_v1.types.GetJobRequest`): - The request object. A request to get the resource - representation for a job in a project. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job_id (:class:`str`): - Required. The job ID. - This corresponds to the ``job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = jobs.GetJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job_id is not None: - request.job_id = job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_jobs(self, - request: jobs.ListJobsRequest = None, - *, - project_id: str = None, - region: str = None, - filter: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListJobsAsyncPager: - r"""Lists regions/{region}/jobs in a project. - - Args: - request (:class:`google.cloud.dataproc_v1.types.ListJobsRequest`): - The request object. A request to list jobs in a project. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - filter (:class:`str`): - Optional. A filter constraining the jobs to list. - Filters are case-sensitive and have the following - syntax: - - [field = value] AND [field [= value]] ... - - where **field** is ``status.state`` or ``labels.[KEY]``, - and ``[KEY]`` is a label key. **value** can be ``*`` to - match all values. ``status.state`` can be either - ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` - operator is supported; space-separated items are treated - as having an implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND labels.env = staging AND - labels.starred = \* - - This corresponds to the ``filter`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.services.job_controller.pagers.ListJobsAsyncPager: - A list of jobs in a project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, filter]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = jobs.ListJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if filter is not None: - request.filter = filter - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_jobs, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_job(self, - request: jobs.UpdateJobRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Updates a job in a project. - - Args: - request (:class:`google.cloud.dataproc_v1.types.UpdateJobRequest`): - The request object. A request to update a job. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - request = jobs.UpdateJobRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def cancel_job(self, - request: jobs.CancelJobRequest = None, - *, - project_id: str = None, - region: str = None, - job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Starts a job cancellation request. To access the job resource - after cancellation, call - `regions/{region}/jobs.list `__ - or - `regions/{region}/jobs.get `__. - - Args: - request (:class:`google.cloud.dataproc_v1.types.CancelJobRequest`): - The request object. A request to cancel a job. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job_id (:class:`str`): - Required. The job ID. - This corresponds to the ``job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = jobs.CancelJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job_id is not None: - request.job_id = job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_job(self, - request: jobs.DeleteJobRequest = None, - *, - project_id: str = None, - region: str = None, - job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes the job from the project. If the job is active, the - delete fails, and the response returns ``FAILED_PRECONDITION``. - - Args: - request (:class:`google.cloud.dataproc_v1.types.DeleteJobRequest`): - The request object. A request to delete a job. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job_id (:class:`str`): - Required. The job ID. - This corresponds to the ``job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = jobs.DeleteJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job_id is not None: - request.job_id = job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "JobControllerAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/client.py deleted file mode 100644 index 42b2cc43..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/client.py +++ /dev/null @@ -1,927 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.dataproc_v1.services.job_controller import pagers -from google.cloud.dataproc_v1.types import jobs -from .transports.base import JobControllerTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import JobControllerGrpcTransport -from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport - - -class JobControllerClientMeta(type): - """Metaclass for the JobController client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]] - _transport_registry["grpc"] = JobControllerGrpcTransport - _transport_registry["grpc_asyncio"] = JobControllerGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[JobControllerTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class JobControllerClient(metaclass=JobControllerClientMeta): - """The JobController provides methods to manage jobs.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "dataproc.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobControllerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobControllerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> JobControllerTransport: - """Returns the transport used by the client instance. - - Returns: - JobControllerTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, JobControllerTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the job controller client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, JobControllerTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, JobControllerTransport): - # transport is a JobControllerTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def submit_job(self, - request: jobs.SubmitJobRequest = None, - *, - project_id: str = None, - region: str = None, - job: jobs.Job = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Submits a job to a cluster. - - Args: - request (google.cloud.dataproc_v1.types.SubmitJobRequest): - The request object. A request to submit a job. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job (google.cloud.dataproc_v1.types.Job): - Required. The job resource. - This corresponds to the ``job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a jobs.SubmitJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.SubmitJobRequest): - request = jobs.SubmitJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job is not None: - request.job = job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.submit_job] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def submit_job_as_operation(self, - request: jobs.SubmitJobRequest = None, - *, - project_id: str = None, - region: str = None, - job: jobs.Job = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Submits job to a cluster. - - Args: - request (google.cloud.dataproc_v1.types.SubmitJobRequest): - The request object. A request to submit a job. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job (google.cloud.dataproc_v1.types.Job): - Required. The job resource. - This corresponds to the ``job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.dataproc_v1.types.Job` A Dataproc - job resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a jobs.SubmitJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.SubmitJobRequest): - request = jobs.SubmitJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job is not None: - request.job = job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.submit_job_as_operation] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - jobs.Job, - metadata_type=jobs.JobMetadata, - ) - - # Done; return the response. - return response - - def get_job(self, - request: jobs.GetJobRequest = None, - *, - project_id: str = None, - region: str = None, - job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Gets the resource representation for a job in a - project. - - Args: - request (google.cloud.dataproc_v1.types.GetJobRequest): - The request object. A request to get the resource - representation for a job in a project. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job_id (str): - Required. The job ID. - This corresponds to the ``job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a jobs.GetJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.GetJobRequest): - request = jobs.GetJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job_id is not None: - request.job_id = job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_job] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_jobs(self, - request: jobs.ListJobsRequest = None, - *, - project_id: str = None, - region: str = None, - filter: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListJobsPager: - r"""Lists regions/{region}/jobs in a project. - - Args: - request (google.cloud.dataproc_v1.types.ListJobsRequest): - The request object. A request to list jobs in a project. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - filter (str): - Optional. A filter constraining the jobs to list. - Filters are case-sensitive and have the following - syntax: - - [field = value] AND [field [= value]] ... - - where **field** is ``status.state`` or ``labels.[KEY]``, - and ``[KEY]`` is a label key. **value** can be ``*`` to - match all values. ``status.state`` can be either - ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` - operator is supported; space-separated items are treated - as having an implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND labels.env = staging AND - labels.starred = \* - - This corresponds to the ``filter`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.services.job_controller.pagers.ListJobsPager: - A list of jobs in a project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, filter]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a jobs.ListJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.ListJobsRequest): - request = jobs.ListJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if filter is not None: - request.filter = filter - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_jobs] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_job(self, - request: jobs.UpdateJobRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Updates a job in a project. - - Args: - request (google.cloud.dataproc_v1.types.UpdateJobRequest): - The request object. A request to update a job. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a jobs.UpdateJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.UpdateJobRequest): - request = jobs.UpdateJobRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_job] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def cancel_job(self, - request: jobs.CancelJobRequest = None, - *, - project_id: str = None, - region: str = None, - job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Starts a job cancellation request. To access the job resource - after cancellation, call - `regions/{region}/jobs.list `__ - or - `regions/{region}/jobs.get `__. - - Args: - request (google.cloud.dataproc_v1.types.CancelJobRequest): - The request object. A request to cancel a job. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job_id (str): - Required. The job ID. - This corresponds to the ``job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a jobs.CancelJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.CancelJobRequest): - request = jobs.CancelJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job_id is not None: - request.job_id = job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_job] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_job(self, - request: jobs.DeleteJobRequest = None, - *, - project_id: str = None, - region: str = None, - job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes the job from the project. If the job is active, the - delete fails, and the response returns ``FAILED_PRECONDITION``. - - Args: - request (google.cloud.dataproc_v1.types.DeleteJobRequest): - The request object. A request to delete a job. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job_id (str): - Required. The job ID. - This corresponds to the ``job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a jobs.DeleteJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.DeleteJobRequest): - request = jobs.DeleteJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job_id is not None: - request.job_id = job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_job] - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "JobControllerClient", -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/pagers.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/pagers.py deleted file mode 100644 index 0143144a..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/pagers.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.dataproc_v1.types import jobs - - -class ListJobsPager: - """A pager for iterating through ``list_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1.types.ListJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListJobs`` requests and continue to iterate - through the ``jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1.types.ListJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., jobs.ListJobsResponse], - request: jobs.ListJobsRequest, - response: jobs.ListJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1.types.ListJobsRequest): - The initial request object. - response (google.cloud.dataproc_v1.types.ListJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = jobs.ListJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[jobs.ListJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[jobs.Job]: - for page in self.pages: - yield from page.jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListJobsAsyncPager: - """A pager for iterating through ``list_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1.types.ListJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListJobs`` requests and continue to iterate - through the ``jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1.types.ListJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[jobs.ListJobsResponse]], - request: jobs.ListJobsRequest, - response: jobs.ListJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1.types.ListJobsRequest): - The initial request object. - response (google.cloud.dataproc_v1.types.ListJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = jobs.ListJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[jobs.ListJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[jobs.Job]: - async def async_generator(): - async for page in self.pages: - for response in page.jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py deleted file mode 100644 index b35119f2..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import JobControllerTransport -from .grpc import JobControllerGrpcTransport -from .grpc_asyncio import JobControllerGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]] -_transport_registry['grpc'] = JobControllerGrpcTransport -_transport_registry['grpc_asyncio'] = JobControllerGrpcAsyncIOTransport - -__all__ = ( - 'JobControllerTransport', - 'JobControllerGrpcTransport', - 'JobControllerGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/base.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/base.py deleted file mode 100644 index f9ccafad..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/base.py +++ /dev/null @@ -1,308 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.dataproc_v1.types import jobs -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-dataproc', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class JobControllerTransport(abc.ABC): - """Abstract transport class for JobController.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'dataproc.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.submit_job: gapic_v1.method.wrap_method( - self.submit_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - self.submit_job_as_operation: gapic_v1.method.wrap_method( - self.submit_job_as_operation, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - self.get_job: gapic_v1.method.wrap_method( - self.get_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - self.list_jobs: gapic_v1.method.wrap_method( - self.list_jobs, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - self.update_job: gapic_v1.method.wrap_method( - self.update_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - self.cancel_job: gapic_v1.method.wrap_method( - self.cancel_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - self.delete_job: gapic_v1.method.wrap_method( - self.delete_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def submit_job(self) -> Callable[ - [jobs.SubmitJobRequest], - Union[ - jobs.Job, - Awaitable[jobs.Job] - ]]: - raise NotImplementedError() - - @property - def submit_job_as_operation(self) -> Callable[ - [jobs.SubmitJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_job(self) -> Callable[ - [jobs.GetJobRequest], - Union[ - jobs.Job, - Awaitable[jobs.Job] - ]]: - raise NotImplementedError() - - @property - def list_jobs(self) -> Callable[ - [jobs.ListJobsRequest], - Union[ - jobs.ListJobsResponse, - Awaitable[jobs.ListJobsResponse] - ]]: - raise NotImplementedError() - - @property - def update_job(self) -> Callable[ - [jobs.UpdateJobRequest], - Union[ - jobs.Job, - Awaitable[jobs.Job] - ]]: - raise NotImplementedError() - - @property - def cancel_job(self) -> Callable[ - [jobs.CancelJobRequest], - Union[ - jobs.Job, - Awaitable[jobs.Job] - ]]: - raise NotImplementedError() - - @property - def delete_job(self) -> Callable[ - [jobs.DeleteJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'JobControllerTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py deleted file mode 100644 index 8a46774c..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py +++ /dev/null @@ -1,434 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.dataproc_v1.types import jobs -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import JobControllerTransport, DEFAULT_CLIENT_INFO - - -class JobControllerGrpcTransport(JobControllerTransport): - """gRPC backend transport for JobController. - - The JobController provides methods to manage jobs. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def submit_job(self) -> Callable[ - [jobs.SubmitJobRequest], - jobs.Job]: - r"""Return a callable for the submit job method over gRPC. - - Submits a job to a cluster. - - Returns: - Callable[[~.SubmitJobRequest], - ~.Job]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'submit_job' not in self._stubs: - self._stubs['submit_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/SubmitJob', - request_serializer=jobs.SubmitJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['submit_job'] - - @property - def submit_job_as_operation(self) -> Callable[ - [jobs.SubmitJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the submit job as operation method over gRPC. - - Submits job to a cluster. - - Returns: - Callable[[~.SubmitJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'submit_job_as_operation' not in self._stubs: - self._stubs['submit_job_as_operation'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/SubmitJobAsOperation', - request_serializer=jobs.SubmitJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['submit_job_as_operation'] - - @property - def get_job(self) -> Callable[ - [jobs.GetJobRequest], - jobs.Job]: - r"""Return a callable for the get job method over gRPC. - - Gets the resource representation for a job in a - project. - - Returns: - Callable[[~.GetJobRequest], - ~.Job]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_job' not in self._stubs: - self._stubs['get_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/GetJob', - request_serializer=jobs.GetJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['get_job'] - - @property - def list_jobs(self) -> Callable[ - [jobs.ListJobsRequest], - jobs.ListJobsResponse]: - r"""Return a callable for the list jobs method over gRPC. - - Lists regions/{region}/jobs in a project. - - Returns: - Callable[[~.ListJobsRequest], - ~.ListJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_jobs' not in self._stubs: - self._stubs['list_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/ListJobs', - request_serializer=jobs.ListJobsRequest.serialize, - response_deserializer=jobs.ListJobsResponse.deserialize, - ) - return self._stubs['list_jobs'] - - @property - def update_job(self) -> Callable[ - [jobs.UpdateJobRequest], - jobs.Job]: - r"""Return a callable for the update job method over gRPC. - - Updates a job in a project. - - Returns: - Callable[[~.UpdateJobRequest], - ~.Job]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_job' not in self._stubs: - self._stubs['update_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/UpdateJob', - request_serializer=jobs.UpdateJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['update_job'] - - @property - def cancel_job(self) -> Callable[ - [jobs.CancelJobRequest], - jobs.Job]: - r"""Return a callable for the cancel job method over gRPC. - - Starts a job cancellation request. To access the job resource - after cancellation, call - `regions/{region}/jobs.list `__ - or - `regions/{region}/jobs.get `__. - - Returns: - Callable[[~.CancelJobRequest], - ~.Job]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_job' not in self._stubs: - self._stubs['cancel_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/CancelJob', - request_serializer=jobs.CancelJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['cancel_job'] - - @property - def delete_job(self) -> Callable[ - [jobs.DeleteJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete job method over gRPC. - - Deletes the job from the project. If the job is active, the - delete fails, and the response returns ``FAILED_PRECONDITION``. - - Returns: - Callable[[~.DeleteJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_job' not in self._stubs: - self._stubs['delete_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/DeleteJob', - request_serializer=jobs.DeleteJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_job'] - - -__all__ = ( - 'JobControllerGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py deleted file mode 100644 index 3c979b2c..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py +++ /dev/null @@ -1,438 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.dataproc_v1.types import jobs -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import JobControllerTransport, DEFAULT_CLIENT_INFO -from .grpc import JobControllerGrpcTransport - - -class JobControllerGrpcAsyncIOTransport(JobControllerTransport): - """gRPC AsyncIO backend transport for JobController. - - The JobController provides methods to manage jobs. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def submit_job(self) -> Callable[ - [jobs.SubmitJobRequest], - Awaitable[jobs.Job]]: - r"""Return a callable for the submit job method over gRPC. - - Submits a job to a cluster. - - Returns: - Callable[[~.SubmitJobRequest], - Awaitable[~.Job]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'submit_job' not in self._stubs: - self._stubs['submit_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/SubmitJob', - request_serializer=jobs.SubmitJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['submit_job'] - - @property - def submit_job_as_operation(self) -> Callable[ - [jobs.SubmitJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the submit job as operation method over gRPC. - - Submits job to a cluster. - - Returns: - Callable[[~.SubmitJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'submit_job_as_operation' not in self._stubs: - self._stubs['submit_job_as_operation'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/SubmitJobAsOperation', - request_serializer=jobs.SubmitJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['submit_job_as_operation'] - - @property - def get_job(self) -> Callable[ - [jobs.GetJobRequest], - Awaitable[jobs.Job]]: - r"""Return a callable for the get job method over gRPC. - - Gets the resource representation for a job in a - project. - - Returns: - Callable[[~.GetJobRequest], - Awaitable[~.Job]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_job' not in self._stubs: - self._stubs['get_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/GetJob', - request_serializer=jobs.GetJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['get_job'] - - @property - def list_jobs(self) -> Callable[ - [jobs.ListJobsRequest], - Awaitable[jobs.ListJobsResponse]]: - r"""Return a callable for the list jobs method over gRPC. - - Lists regions/{region}/jobs in a project. - - Returns: - Callable[[~.ListJobsRequest], - Awaitable[~.ListJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_jobs' not in self._stubs: - self._stubs['list_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/ListJobs', - request_serializer=jobs.ListJobsRequest.serialize, - response_deserializer=jobs.ListJobsResponse.deserialize, - ) - return self._stubs['list_jobs'] - - @property - def update_job(self) -> Callable[ - [jobs.UpdateJobRequest], - Awaitable[jobs.Job]]: - r"""Return a callable for the update job method over gRPC. - - Updates a job in a project. - - Returns: - Callable[[~.UpdateJobRequest], - Awaitable[~.Job]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_job' not in self._stubs: - self._stubs['update_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/UpdateJob', - request_serializer=jobs.UpdateJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['update_job'] - - @property - def cancel_job(self) -> Callable[ - [jobs.CancelJobRequest], - Awaitable[jobs.Job]]: - r"""Return a callable for the cancel job method over gRPC. - - Starts a job cancellation request. To access the job resource - after cancellation, call - `regions/{region}/jobs.list `__ - or - `regions/{region}/jobs.get `__. - - Returns: - Callable[[~.CancelJobRequest], - Awaitable[~.Job]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_job' not in self._stubs: - self._stubs['cancel_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/CancelJob', - request_serializer=jobs.CancelJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['cancel_job'] - - @property - def delete_job(self) -> Callable[ - [jobs.DeleteJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete job method over gRPC. - - Deletes the job from the project. If the job is active, the - delete fails, and the response returns ``FAILED_PRECONDITION``. - - Returns: - Callable[[~.DeleteJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_job' not in self._stubs: - self._stubs['delete_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.JobController/DeleteJob', - request_serializer=jobs.DeleteJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_job'] - - -__all__ = ( - 'JobControllerGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/__init__.py deleted file mode 100644 index 1dd621e9..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import WorkflowTemplateServiceClient -from .async_client import WorkflowTemplateServiceAsyncClient - -__all__ = ( - 'WorkflowTemplateServiceClient', - 'WorkflowTemplateServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py deleted file mode 100644 index 55f3a1e9..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py +++ /dev/null @@ -1,945 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.dataproc_v1.services.workflow_template_service import pagers -from google.cloud.dataproc_v1.types import workflow_templates -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport -from .client import WorkflowTemplateServiceClient - - -class WorkflowTemplateServiceAsyncClient: - """The API interface for managing Workflow Templates in the - Dataproc API. - """ - - _client: WorkflowTemplateServiceClient - - DEFAULT_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_MTLS_ENDPOINT - - cluster_path = staticmethod(WorkflowTemplateServiceClient.cluster_path) - parse_cluster_path = staticmethod(WorkflowTemplateServiceClient.parse_cluster_path) - service_path = staticmethod(WorkflowTemplateServiceClient.service_path) - parse_service_path = staticmethod(WorkflowTemplateServiceClient.parse_service_path) - workflow_template_path = staticmethod(WorkflowTemplateServiceClient.workflow_template_path) - parse_workflow_template_path = staticmethod(WorkflowTemplateServiceClient.parse_workflow_template_path) - common_billing_account_path = staticmethod(WorkflowTemplateServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(WorkflowTemplateServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(WorkflowTemplateServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(WorkflowTemplateServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(WorkflowTemplateServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(WorkflowTemplateServiceClient.parse_common_organization_path) - common_project_path = staticmethod(WorkflowTemplateServiceClient.common_project_path) - parse_common_project_path = staticmethod(WorkflowTemplateServiceClient.parse_common_project_path) - common_location_path = staticmethod(WorkflowTemplateServiceClient.common_location_path) - parse_common_location_path = staticmethod(WorkflowTemplateServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - WorkflowTemplateServiceAsyncClient: The constructed client. - """ - return WorkflowTemplateServiceClient.from_service_account_info.__func__(WorkflowTemplateServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - WorkflowTemplateServiceAsyncClient: The constructed client. - """ - return WorkflowTemplateServiceClient.from_service_account_file.__func__(WorkflowTemplateServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> WorkflowTemplateServiceTransport: - """Returns the transport used by the client instance. - - Returns: - WorkflowTemplateServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(WorkflowTemplateServiceClient).get_transport_class, type(WorkflowTemplateServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, WorkflowTemplateServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the workflow template service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.WorkflowTemplateServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = WorkflowTemplateServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_workflow_template(self, - request: workflow_templates.CreateWorkflowTemplateRequest = None, - *, - parent: str = None, - template: workflow_templates.WorkflowTemplate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> workflow_templates.WorkflowTemplate: - r"""Creates new workflow template. - - Args: - request (:class:`google.cloud.dataproc_v1.types.CreateWorkflowTemplateRequest`): - The request object. A request to create a workflow - template. - parent (:class:`str`): - Required. The resource name of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,create``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.create``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - template (:class:`google.cloud.dataproc_v1.types.WorkflowTemplate`): - Required. The Dataproc workflow - template to create. - - This corresponds to the ``template`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.WorkflowTemplate: - A Dataproc workflow template - resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, template]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.CreateWorkflowTemplateRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if template is not None: - request.template = template - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_workflow_template(self, - request: workflow_templates.GetWorkflowTemplateRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> workflow_templates.WorkflowTemplate: - r"""Retrieves the latest workflow template. - Can retrieve previously instantiated template by - specifying optional version parameter. - - Args: - request (:class:`google.cloud.dataproc_v1.types.GetWorkflowTemplateRequest`): - The request object. A request to fetch a workflow - template. - name (:class:`str`): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.get``, the - resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.get``, the - resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.WorkflowTemplate: - A Dataproc workflow template - resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.GetWorkflowTemplateRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def instantiate_workflow_template(self, - request: workflow_templates.InstantiateWorkflowTemplateRequest = None, - *, - name: str = None, - parameters: Sequence[workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Instantiates a template and begins execution. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Args: - request (:class:`google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest`): - The request object. A request to instantiate a workflow - template. - name (:class:`str`): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For - ``projects.regions.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For - ``projects.locations.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (:class:`Sequence[google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest.ParametersEntry]`): - Optional. Map from parameter names to - values that should be used for those - parameters. Values may not exceed 1000 - characters. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, parameters]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.InstantiateWorkflowTemplateRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - if parameters: - request.parameters.update(parameters) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.instantiate_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=workflow_templates.WorkflowMetadata, - ) - - # Done; return the response. - return response - - async def instantiate_inline_workflow_template(self, - request: workflow_templates.InstantiateInlineWorkflowTemplateRequest = None, - *, - parent: str = None, - template: workflow_templates.WorkflowTemplate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], - [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], - [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Args: - request (:class:`google.cloud.dataproc_v1.types.InstantiateInlineWorkflowTemplateRequest`): - The request object. A request to instantiate an inline - workflow template. - parent (:class:`str`): - Required. The resource name of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For - ``projects.regions.workflowTemplates,instantiateinline``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For - ``projects.locations.workflowTemplates.instantiateinline``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - template (:class:`google.cloud.dataproc_v1.types.WorkflowTemplate`): - Required. The workflow template to - instantiate. - - This corresponds to the ``template`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, template]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.InstantiateInlineWorkflowTemplateRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if template is not None: - request.template = template - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.instantiate_inline_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=workflow_templates.WorkflowMetadata, - ) - - # Done; return the response. - return response - - async def update_workflow_template(self, - request: workflow_templates.UpdateWorkflowTemplateRequest = None, - *, - template: workflow_templates.WorkflowTemplate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> workflow_templates.WorkflowTemplate: - r"""Updates (replaces) workflow template. The updated - template must contain version that matches the current - server version. - - Args: - request (:class:`google.cloud.dataproc_v1.types.UpdateWorkflowTemplateRequest`): - The request object. A request to update a workflow - template. - template (:class:`google.cloud.dataproc_v1.types.WorkflowTemplate`): - Required. The updated workflow template. - - The ``template.version`` field must match the current - version. - - This corresponds to the ``template`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.WorkflowTemplate: - A Dataproc workflow template - resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([template]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.UpdateWorkflowTemplateRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if template is not None: - request.template = template - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("template.name", request.template.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_workflow_templates(self, - request: workflow_templates.ListWorkflowTemplatesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListWorkflowTemplatesAsyncPager: - r"""Lists workflows that match the specified filter in - the request. - - Args: - request (:class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest`): - The request object. A request to list workflow templates - in a project. - parent (:class:`str`): - Required. The resource name of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,list``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.list``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.services.workflow_template_service.pagers.ListWorkflowTemplatesAsyncPager: - A response to a request to list - workflow templates in a project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.ListWorkflowTemplatesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_workflow_templates, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListWorkflowTemplatesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_workflow_template(self, - request: workflow_templates.DeleteWorkflowTemplateRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a workflow template. It does not cancel in- - rogress workflows. - - Args: - request (:class:`google.cloud.dataproc_v1.types.DeleteWorkflowTemplateRequest`): - The request object. A request to delete a workflow - template. - Currently started workflows will remain running. - name (:class:`str`): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.delete``, - the resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For - ``projects.locations.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.DeleteWorkflowTemplateRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "WorkflowTemplateServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/client.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/client.py deleted file mode 100644 index 7713f026..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/client.py +++ /dev/null @@ -1,1103 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.dataproc_v1.services.workflow_template_service import pagers -from google.cloud.dataproc_v1.types import workflow_templates -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import WorkflowTemplateServiceGrpcTransport -from .transports.grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport - - -class WorkflowTemplateServiceClientMeta(type): - """Metaclass for the WorkflowTemplateService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[WorkflowTemplateServiceTransport]] - _transport_registry["grpc"] = WorkflowTemplateServiceGrpcTransport - _transport_registry["grpc_asyncio"] = WorkflowTemplateServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[WorkflowTemplateServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class WorkflowTemplateServiceClient(metaclass=WorkflowTemplateServiceClientMeta): - """The API interface for managing Workflow Templates in the - Dataproc API. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "dataproc.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - WorkflowTemplateServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - WorkflowTemplateServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> WorkflowTemplateServiceTransport: - """Returns the transport used by the client instance. - - Returns: - WorkflowTemplateServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def cluster_path(project: str,location: str,cluster: str,) -> str: - """Returns a fully-qualified cluster string.""" - return "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) - - @staticmethod - def parse_cluster_path(path: str) -> Dict[str,str]: - """Parses a cluster path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def service_path(project: str,location: str,service: str,) -> str: - """Returns a fully-qualified service string.""" - return "projects/{project}/locations/{location}/services/{service}".format(project=project, location=location, service=service, ) - - @staticmethod - def parse_service_path(path: str) -> Dict[str,str]: - """Parses a service path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/services/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def workflow_template_path(project: str,region: str,workflow_template: str,) -> str: - """Returns a fully-qualified workflow_template string.""" - return "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format(project=project, region=region, workflow_template=workflow_template, ) - - @staticmethod - def parse_workflow_template_path(path: str) -> Dict[str,str]: - """Parses a workflow_template path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/regions/(?P.+?)/workflowTemplates/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, WorkflowTemplateServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the workflow template service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, WorkflowTemplateServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, WorkflowTemplateServiceTransport): - # transport is a WorkflowTemplateServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_workflow_template(self, - request: workflow_templates.CreateWorkflowTemplateRequest = None, - *, - parent: str = None, - template: workflow_templates.WorkflowTemplate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> workflow_templates.WorkflowTemplate: - r"""Creates new workflow template. - - Args: - request (google.cloud.dataproc_v1.types.CreateWorkflowTemplateRequest): - The request object. A request to create a workflow - template. - parent (str): - Required. The resource name of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,create``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.create``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - template (google.cloud.dataproc_v1.types.WorkflowTemplate): - Required. The Dataproc workflow - template to create. - - This corresponds to the ``template`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.WorkflowTemplate: - A Dataproc workflow template - resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, template]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.CreateWorkflowTemplateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.CreateWorkflowTemplateRequest): - request = workflow_templates.CreateWorkflowTemplateRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if template is not None: - request.template = template - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_workflow_template] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_workflow_template(self, - request: workflow_templates.GetWorkflowTemplateRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> workflow_templates.WorkflowTemplate: - r"""Retrieves the latest workflow template. - Can retrieve previously instantiated template by - specifying optional version parameter. - - Args: - request (google.cloud.dataproc_v1.types.GetWorkflowTemplateRequest): - The request object. A request to fetch a workflow - template. - name (str): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.get``, the - resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.get``, the - resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.WorkflowTemplate: - A Dataproc workflow template - resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.GetWorkflowTemplateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.GetWorkflowTemplateRequest): - request = workflow_templates.GetWorkflowTemplateRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_workflow_template] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def instantiate_workflow_template(self, - request: workflow_templates.InstantiateWorkflowTemplateRequest = None, - *, - name: str = None, - parameters: Sequence[workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Instantiates a template and begins execution. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Args: - request (google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest): - The request object. A request to instantiate a workflow - template. - name (str): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For - ``projects.regions.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For - ``projects.locations.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (Sequence[google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest.ParametersEntry]): - Optional. Map from parameter names to - values that should be used for those - parameters. Values may not exceed 1000 - characters. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, parameters]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.InstantiateWorkflowTemplateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.InstantiateWorkflowTemplateRequest): - request = workflow_templates.InstantiateWorkflowTemplateRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if parameters is not None: - request.parameters = parameters - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.instantiate_workflow_template] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=workflow_templates.WorkflowMetadata, - ) - - # Done; return the response. - return response - - def instantiate_inline_workflow_template(self, - request: workflow_templates.InstantiateInlineWorkflowTemplateRequest = None, - *, - parent: str = None, - template: workflow_templates.WorkflowTemplate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], - [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], - [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Args: - request (google.cloud.dataproc_v1.types.InstantiateInlineWorkflowTemplateRequest): - The request object. A request to instantiate an inline - workflow template. - parent (str): - Required. The resource name of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For - ``projects.regions.workflowTemplates,instantiateinline``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For - ``projects.locations.workflowTemplates.instantiateinline``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - template (google.cloud.dataproc_v1.types.WorkflowTemplate): - Required. The workflow template to - instantiate. - - This corresponds to the ``template`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, template]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.InstantiateInlineWorkflowTemplateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.InstantiateInlineWorkflowTemplateRequest): - request = workflow_templates.InstantiateInlineWorkflowTemplateRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if template is not None: - request.template = template - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.instantiate_inline_workflow_template] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=workflow_templates.WorkflowMetadata, - ) - - # Done; return the response. - return response - - def update_workflow_template(self, - request: workflow_templates.UpdateWorkflowTemplateRequest = None, - *, - template: workflow_templates.WorkflowTemplate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> workflow_templates.WorkflowTemplate: - r"""Updates (replaces) workflow template. The updated - template must contain version that matches the current - server version. - - Args: - request (google.cloud.dataproc_v1.types.UpdateWorkflowTemplateRequest): - The request object. A request to update a workflow - template. - template (google.cloud.dataproc_v1.types.WorkflowTemplate): - Required. The updated workflow template. - - The ``template.version`` field must match the current - version. - - This corresponds to the ``template`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.types.WorkflowTemplate: - A Dataproc workflow template - resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([template]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.UpdateWorkflowTemplateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.UpdateWorkflowTemplateRequest): - request = workflow_templates.UpdateWorkflowTemplateRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if template is not None: - request.template = template - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_workflow_template] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("template.name", request.template.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_workflow_templates(self, - request: workflow_templates.ListWorkflowTemplatesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListWorkflowTemplatesPager: - r"""Lists workflows that match the specified filter in - the request. - - Args: - request (google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest): - The request object. A request to list workflow templates - in a project. - parent (str): - Required. The resource name of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,list``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.list``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1.services.workflow_template_service.pagers.ListWorkflowTemplatesPager: - A response to a request to list - workflow templates in a project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.ListWorkflowTemplatesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.ListWorkflowTemplatesRequest): - request = workflow_templates.ListWorkflowTemplatesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_workflow_templates] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListWorkflowTemplatesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_workflow_template(self, - request: workflow_templates.DeleteWorkflowTemplateRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a workflow template. It does not cancel in- - rogress workflows. - - Args: - request (google.cloud.dataproc_v1.types.DeleteWorkflowTemplateRequest): - The request object. A request to delete a workflow - template. - Currently started workflows will remain running. - name (str): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.delete``, - the resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For - ``projects.locations.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.DeleteWorkflowTemplateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.DeleteWorkflowTemplateRequest): - request = workflow_templates.DeleteWorkflowTemplateRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_workflow_template] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "WorkflowTemplateServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py deleted file mode 100644 index 2da3d2c9..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.dataproc_v1.types import workflow_templates - - -class ListWorkflowTemplatesPager: - """A pager for iterating through ``list_workflow_templates`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``templates`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListWorkflowTemplates`` requests and continue to iterate - through the ``templates`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., workflow_templates.ListWorkflowTemplatesResponse], - request: workflow_templates.ListWorkflowTemplatesRequest, - response: workflow_templates.ListWorkflowTemplatesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest): - The initial request object. - response (google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = workflow_templates.ListWorkflowTemplatesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[workflow_templates.ListWorkflowTemplatesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[workflow_templates.WorkflowTemplate]: - for page in self.pages: - yield from page.templates - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListWorkflowTemplatesAsyncPager: - """A pager for iterating through ``list_workflow_templates`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``templates`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListWorkflowTemplates`` requests and continue to iterate - through the ``templates`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[workflow_templates.ListWorkflowTemplatesResponse]], - request: workflow_templates.ListWorkflowTemplatesRequest, - response: workflow_templates.ListWorkflowTemplatesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest): - The initial request object. - response (google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = workflow_templates.ListWorkflowTemplatesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[workflow_templates.ListWorkflowTemplatesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[workflow_templates.WorkflowTemplate]: - async def async_generator(): - async for page in self.pages: - for response in page.templates: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py deleted file mode 100644 index 96efd4cb..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import WorkflowTemplateServiceTransport -from .grpc import WorkflowTemplateServiceGrpcTransport -from .grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[WorkflowTemplateServiceTransport]] -_transport_registry['grpc'] = WorkflowTemplateServiceGrpcTransport -_transport_registry['grpc_asyncio'] = WorkflowTemplateServiceGrpcAsyncIOTransport - -__all__ = ( - 'WorkflowTemplateServiceTransport', - 'WorkflowTemplateServiceGrpcTransport', - 'WorkflowTemplateServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py deleted file mode 100644 index fef8a855..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py +++ /dev/null @@ -1,306 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.dataproc_v1.types import workflow_templates -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-dataproc', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class WorkflowTemplateServiceTransport(abc.ABC): - """Abstract transport class for WorkflowTemplateService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'dataproc.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_workflow_template: gapic_v1.method.wrap_method( - self.create_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.get_workflow_template: gapic_v1.method.wrap_method( - self.get_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.instantiate_workflow_template: gapic_v1.method.wrap_method( - self.instantiate_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.instantiate_inline_workflow_template: gapic_v1.method.wrap_method( - self.instantiate_inline_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.update_workflow_template: gapic_v1.method.wrap_method( - self.update_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.list_workflow_templates: gapic_v1.method.wrap_method( - self.list_workflow_templates, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.delete_workflow_template: gapic_v1.method.wrap_method( - self.delete_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_workflow_template(self) -> Callable[ - [workflow_templates.CreateWorkflowTemplateRequest], - Union[ - workflow_templates.WorkflowTemplate, - Awaitable[workflow_templates.WorkflowTemplate] - ]]: - raise NotImplementedError() - - @property - def get_workflow_template(self) -> Callable[ - [workflow_templates.GetWorkflowTemplateRequest], - Union[ - workflow_templates.WorkflowTemplate, - Awaitable[workflow_templates.WorkflowTemplate] - ]]: - raise NotImplementedError() - - @property - def instantiate_workflow_template(self) -> Callable[ - [workflow_templates.InstantiateWorkflowTemplateRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def instantiate_inline_workflow_template(self) -> Callable[ - [workflow_templates.InstantiateInlineWorkflowTemplateRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def update_workflow_template(self) -> Callable[ - [workflow_templates.UpdateWorkflowTemplateRequest], - Union[ - workflow_templates.WorkflowTemplate, - Awaitable[workflow_templates.WorkflowTemplate] - ]]: - raise NotImplementedError() - - @property - def list_workflow_templates(self) -> Callable[ - [workflow_templates.ListWorkflowTemplatesRequest], - Union[ - workflow_templates.ListWorkflowTemplatesResponse, - Awaitable[workflow_templates.ListWorkflowTemplatesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_workflow_template(self) -> Callable[ - [workflow_templates.DeleteWorkflowTemplateRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'WorkflowTemplateServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py deleted file mode 100644 index c4f896aa..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py +++ /dev/null @@ -1,481 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.dataproc_v1.types import workflow_templates -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO - - -class WorkflowTemplateServiceGrpcTransport(WorkflowTemplateServiceTransport): - """gRPC backend transport for WorkflowTemplateService. - - The API interface for managing Workflow Templates in the - Dataproc API. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_workflow_template(self) -> Callable[ - [workflow_templates.CreateWorkflowTemplateRequest], - workflow_templates.WorkflowTemplate]: - r"""Return a callable for the create workflow template method over gRPC. - - Creates new workflow template. - - Returns: - Callable[[~.CreateWorkflowTemplateRequest], - ~.WorkflowTemplate]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_workflow_template' not in self._stubs: - self._stubs['create_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/CreateWorkflowTemplate', - request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize, - response_deserializer=workflow_templates.WorkflowTemplate.deserialize, - ) - return self._stubs['create_workflow_template'] - - @property - def get_workflow_template(self) -> Callable[ - [workflow_templates.GetWorkflowTemplateRequest], - workflow_templates.WorkflowTemplate]: - r"""Return a callable for the get workflow template method over gRPC. - - Retrieves the latest workflow template. - Can retrieve previously instantiated template by - specifying optional version parameter. - - Returns: - Callable[[~.GetWorkflowTemplateRequest], - ~.WorkflowTemplate]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_workflow_template' not in self._stubs: - self._stubs['get_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/GetWorkflowTemplate', - request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize, - response_deserializer=workflow_templates.WorkflowTemplate.deserialize, - ) - return self._stubs['get_workflow_template'] - - @property - def instantiate_workflow_template(self) -> Callable[ - [workflow_templates.InstantiateWorkflowTemplateRequest], - operations_pb2.Operation]: - r"""Return a callable for the instantiate workflow template method over gRPC. - - Instantiates a template and begins execution. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Returns: - Callable[[~.InstantiateWorkflowTemplateRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'instantiate_workflow_template' not in self._stubs: - self._stubs['instantiate_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateWorkflowTemplate', - request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['instantiate_workflow_template'] - - @property - def instantiate_inline_workflow_template(self) -> Callable[ - [workflow_templates.InstantiateInlineWorkflowTemplateRequest], - operations_pb2.Operation]: - r"""Return a callable for the instantiate inline workflow - template method over gRPC. - - Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], - [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], - [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Returns: - Callable[[~.InstantiateInlineWorkflowTemplateRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'instantiate_inline_workflow_template' not in self._stubs: - self._stubs['instantiate_inline_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateInlineWorkflowTemplate', - request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['instantiate_inline_workflow_template'] - - @property - def update_workflow_template(self) -> Callable[ - [workflow_templates.UpdateWorkflowTemplateRequest], - workflow_templates.WorkflowTemplate]: - r"""Return a callable for the update workflow template method over gRPC. - - Updates (replaces) workflow template. The updated - template must contain version that matches the current - server version. - - Returns: - Callable[[~.UpdateWorkflowTemplateRequest], - ~.WorkflowTemplate]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_workflow_template' not in self._stubs: - self._stubs['update_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/UpdateWorkflowTemplate', - request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize, - response_deserializer=workflow_templates.WorkflowTemplate.deserialize, - ) - return self._stubs['update_workflow_template'] - - @property - def list_workflow_templates(self) -> Callable[ - [workflow_templates.ListWorkflowTemplatesRequest], - workflow_templates.ListWorkflowTemplatesResponse]: - r"""Return a callable for the list workflow templates method over gRPC. - - Lists workflows that match the specified filter in - the request. - - Returns: - Callable[[~.ListWorkflowTemplatesRequest], - ~.ListWorkflowTemplatesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_workflow_templates' not in self._stubs: - self._stubs['list_workflow_templates'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/ListWorkflowTemplates', - request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize, - response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize, - ) - return self._stubs['list_workflow_templates'] - - @property - def delete_workflow_template(self) -> Callable[ - [workflow_templates.DeleteWorkflowTemplateRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete workflow template method over gRPC. - - Deletes a workflow template. It does not cancel in- - rogress workflows. - - Returns: - Callable[[~.DeleteWorkflowTemplateRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_workflow_template' not in self._stubs: - self._stubs['delete_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/DeleteWorkflowTemplate', - request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_workflow_template'] - - -__all__ = ( - 'WorkflowTemplateServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py deleted file mode 100644 index 6783c33d..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,485 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.dataproc_v1.types import workflow_templates -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import WorkflowTemplateServiceGrpcTransport - - -class WorkflowTemplateServiceGrpcAsyncIOTransport(WorkflowTemplateServiceTransport): - """gRPC AsyncIO backend transport for WorkflowTemplateService. - - The API interface for managing Workflow Templates in the - Dataproc API. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_workflow_template(self) -> Callable[ - [workflow_templates.CreateWorkflowTemplateRequest], - Awaitable[workflow_templates.WorkflowTemplate]]: - r"""Return a callable for the create workflow template method over gRPC. - - Creates new workflow template. - - Returns: - Callable[[~.CreateWorkflowTemplateRequest], - Awaitable[~.WorkflowTemplate]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_workflow_template' not in self._stubs: - self._stubs['create_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/CreateWorkflowTemplate', - request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize, - response_deserializer=workflow_templates.WorkflowTemplate.deserialize, - ) - return self._stubs['create_workflow_template'] - - @property - def get_workflow_template(self) -> Callable[ - [workflow_templates.GetWorkflowTemplateRequest], - Awaitable[workflow_templates.WorkflowTemplate]]: - r"""Return a callable for the get workflow template method over gRPC. - - Retrieves the latest workflow template. - Can retrieve previously instantiated template by - specifying optional version parameter. - - Returns: - Callable[[~.GetWorkflowTemplateRequest], - Awaitable[~.WorkflowTemplate]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_workflow_template' not in self._stubs: - self._stubs['get_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/GetWorkflowTemplate', - request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize, - response_deserializer=workflow_templates.WorkflowTemplate.deserialize, - ) - return self._stubs['get_workflow_template'] - - @property - def instantiate_workflow_template(self) -> Callable[ - [workflow_templates.InstantiateWorkflowTemplateRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the instantiate workflow template method over gRPC. - - Instantiates a template and begins execution. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Returns: - Callable[[~.InstantiateWorkflowTemplateRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'instantiate_workflow_template' not in self._stubs: - self._stubs['instantiate_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateWorkflowTemplate', - request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['instantiate_workflow_template'] - - @property - def instantiate_inline_workflow_template(self) -> Callable[ - [workflow_templates.InstantiateInlineWorkflowTemplateRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the instantiate inline workflow - template method over gRPC. - - Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], - [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], - [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Returns: - Callable[[~.InstantiateInlineWorkflowTemplateRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'instantiate_inline_workflow_template' not in self._stubs: - self._stubs['instantiate_inline_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateInlineWorkflowTemplate', - request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['instantiate_inline_workflow_template'] - - @property - def update_workflow_template(self) -> Callable[ - [workflow_templates.UpdateWorkflowTemplateRequest], - Awaitable[workflow_templates.WorkflowTemplate]]: - r"""Return a callable for the update workflow template method over gRPC. - - Updates (replaces) workflow template. The updated - template must contain version that matches the current - server version. - - Returns: - Callable[[~.UpdateWorkflowTemplateRequest], - Awaitable[~.WorkflowTemplate]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_workflow_template' not in self._stubs: - self._stubs['update_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/UpdateWorkflowTemplate', - request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize, - response_deserializer=workflow_templates.WorkflowTemplate.deserialize, - ) - return self._stubs['update_workflow_template'] - - @property - def list_workflow_templates(self) -> Callable[ - [workflow_templates.ListWorkflowTemplatesRequest], - Awaitable[workflow_templates.ListWorkflowTemplatesResponse]]: - r"""Return a callable for the list workflow templates method over gRPC. - - Lists workflows that match the specified filter in - the request. - - Returns: - Callable[[~.ListWorkflowTemplatesRequest], - Awaitable[~.ListWorkflowTemplatesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_workflow_templates' not in self._stubs: - self._stubs['list_workflow_templates'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/ListWorkflowTemplates', - request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize, - response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize, - ) - return self._stubs['list_workflow_templates'] - - @property - def delete_workflow_template(self) -> Callable[ - [workflow_templates.DeleteWorkflowTemplateRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete workflow template method over gRPC. - - Deletes a workflow template. It does not cancel in- - rogress workflows. - - Returns: - Callable[[~.DeleteWorkflowTemplateRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_workflow_template' not in self._stubs: - self._stubs['delete_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1.WorkflowTemplateService/DeleteWorkflowTemplate', - request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_workflow_template'] - - -__all__ = ( - 'WorkflowTemplateServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/__init__.py deleted file mode 100644 index 92d3cd45..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/__init__.py +++ /dev/null @@ -1,209 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .autoscaling_policies import ( - AutoscalingPolicy, - BasicAutoscalingAlgorithm, - BasicYarnAutoscalingConfig, - CreateAutoscalingPolicyRequest, - DeleteAutoscalingPolicyRequest, - GetAutoscalingPolicyRequest, - InstanceGroupAutoscalingPolicyConfig, - ListAutoscalingPoliciesRequest, - ListAutoscalingPoliciesResponse, - UpdateAutoscalingPolicyRequest, -) -from .clusters import ( - AcceleratorConfig, - AutoscalingConfig, - Cluster, - ClusterConfig, - ClusterMetrics, - ClusterStatus, - CreateClusterRequest, - DeleteClusterRequest, - DiagnoseClusterRequest, - DiagnoseClusterResults, - DiskConfig, - EncryptionConfig, - EndpointConfig, - GceClusterConfig, - GetClusterRequest, - GkeClusterConfig, - IdentityConfig, - InstanceGroupConfig, - KerberosConfig, - LifecycleConfig, - ListClustersRequest, - ListClustersResponse, - ManagedGroupConfig, - MetastoreConfig, - NodeGroupAffinity, - NodeInitializationAction, - ReservationAffinity, - SecurityConfig, - ShieldedInstanceConfig, - SoftwareConfig, - StartClusterRequest, - StopClusterRequest, - UpdateClusterRequest, -) -from .jobs import ( - CancelJobRequest, - DeleteJobRequest, - GetJobRequest, - HadoopJob, - HiveJob, - Job, - JobMetadata, - JobPlacement, - JobReference, - JobScheduling, - JobStatus, - ListJobsRequest, - ListJobsResponse, - LoggingConfig, - PigJob, - PrestoJob, - PySparkJob, - QueryList, - SparkJob, - SparkRJob, - SparkSqlJob, - SubmitJobRequest, - UpdateJobRequest, - YarnApplication, -) -from .operations import ( - ClusterOperationMetadata, - ClusterOperationStatus, -) -from .workflow_templates import ( - ClusterOperation, - ClusterSelector, - CreateWorkflowTemplateRequest, - DeleteWorkflowTemplateRequest, - GetWorkflowTemplateRequest, - InstantiateInlineWorkflowTemplateRequest, - InstantiateWorkflowTemplateRequest, - ListWorkflowTemplatesRequest, - ListWorkflowTemplatesResponse, - ManagedCluster, - OrderedJob, - ParameterValidation, - RegexValidation, - TemplateParameter, - UpdateWorkflowTemplateRequest, - ValueValidation, - WorkflowGraph, - WorkflowMetadata, - WorkflowNode, - WorkflowTemplate, - WorkflowTemplatePlacement, -) - -__all__ = ( - 'AutoscalingPolicy', - 'BasicAutoscalingAlgorithm', - 'BasicYarnAutoscalingConfig', - 'CreateAutoscalingPolicyRequest', - 'DeleteAutoscalingPolicyRequest', - 'GetAutoscalingPolicyRequest', - 'InstanceGroupAutoscalingPolicyConfig', - 'ListAutoscalingPoliciesRequest', - 'ListAutoscalingPoliciesResponse', - 'UpdateAutoscalingPolicyRequest', - 'AcceleratorConfig', - 'AutoscalingConfig', - 'Cluster', - 'ClusterConfig', - 'ClusterMetrics', - 'ClusterStatus', - 'CreateClusterRequest', - 'DeleteClusterRequest', - 'DiagnoseClusterRequest', - 'DiagnoseClusterResults', - 'DiskConfig', - 'EncryptionConfig', - 'EndpointConfig', - 'GceClusterConfig', - 'GetClusterRequest', - 'GkeClusterConfig', - 'IdentityConfig', - 'InstanceGroupConfig', - 'KerberosConfig', - 'LifecycleConfig', - 'ListClustersRequest', - 'ListClustersResponse', - 'ManagedGroupConfig', - 'MetastoreConfig', - 'NodeGroupAffinity', - 'NodeInitializationAction', - 'ReservationAffinity', - 'SecurityConfig', - 'ShieldedInstanceConfig', - 'SoftwareConfig', - 'StartClusterRequest', - 'StopClusterRequest', - 'UpdateClusterRequest', - 'CancelJobRequest', - 'DeleteJobRequest', - 'GetJobRequest', - 'HadoopJob', - 'HiveJob', - 'Job', - 'JobMetadata', - 'JobPlacement', - 'JobReference', - 'JobScheduling', - 'JobStatus', - 'ListJobsRequest', - 'ListJobsResponse', - 'LoggingConfig', - 'PigJob', - 'PrestoJob', - 'PySparkJob', - 'QueryList', - 'SparkJob', - 'SparkRJob', - 'SparkSqlJob', - 'SubmitJobRequest', - 'UpdateJobRequest', - 'YarnApplication', - 'ClusterOperationMetadata', - 'ClusterOperationStatus', - 'Component', - 'ClusterOperation', - 'ClusterSelector', - 'CreateWorkflowTemplateRequest', - 'DeleteWorkflowTemplateRequest', - 'GetWorkflowTemplateRequest', - 'InstantiateInlineWorkflowTemplateRequest', - 'InstantiateWorkflowTemplateRequest', - 'ListWorkflowTemplatesRequest', - 'ListWorkflowTemplatesResponse', - 'ManagedCluster', - 'OrderedJob', - 'ParameterValidation', - 'RegexValidation', - 'TemplateParameter', - 'UpdateWorkflowTemplateRequest', - 'ValueValidation', - 'WorkflowGraph', - 'WorkflowMetadata', - 'WorkflowNode', - 'WorkflowTemplate', - 'WorkflowTemplatePlacement', -) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/autoscaling_policies.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/autoscaling_policies.py deleted file mode 100644 index 2d9f8651..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/autoscaling_policies.py +++ /dev/null @@ -1,416 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.dataproc.v1', - manifest={ - 'AutoscalingPolicy', - 'BasicAutoscalingAlgorithm', - 'BasicYarnAutoscalingConfig', - 'InstanceGroupAutoscalingPolicyConfig', - 'CreateAutoscalingPolicyRequest', - 'GetAutoscalingPolicyRequest', - 'UpdateAutoscalingPolicyRequest', - 'DeleteAutoscalingPolicyRequest', - 'ListAutoscalingPoliciesRequest', - 'ListAutoscalingPoliciesResponse', - }, -) - - -class AutoscalingPolicy(proto.Message): - r"""Describes an autoscaling policy for Dataproc cluster - autoscaler. - - Attributes: - id (str): - Required. The policy id. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). Cannot begin or end with - underscore or hyphen. Must consist of between 3 and 50 - characters. - name (str): - Output only. The "resource name" of the autoscaling policy, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies``, the - resource name of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies``, the - resource name of the policy has the following format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - basic_algorithm (google.cloud.dataproc_v1.types.BasicAutoscalingAlgorithm): - - worker_config (google.cloud.dataproc_v1.types.InstanceGroupAutoscalingPolicyConfig): - Required. Describes how the autoscaler will - operate for primary workers. - secondary_worker_config (google.cloud.dataproc_v1.types.InstanceGroupAutoscalingPolicyConfig): - Optional. Describes how the autoscaler will - operate for secondary workers. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - name = proto.Field( - proto.STRING, - number=2, - ) - basic_algorithm = proto.Field( - proto.MESSAGE, - number=3, - oneof='algorithm', - message='BasicAutoscalingAlgorithm', - ) - worker_config = proto.Field( - proto.MESSAGE, - number=4, - message='InstanceGroupAutoscalingPolicyConfig', - ) - secondary_worker_config = proto.Field( - proto.MESSAGE, - number=5, - message='InstanceGroupAutoscalingPolicyConfig', - ) - - -class BasicAutoscalingAlgorithm(proto.Message): - r"""Basic algorithm for autoscaling. - Attributes: - yarn_config (google.cloud.dataproc_v1.types.BasicYarnAutoscalingConfig): - Required. YARN autoscaling configuration. - cooldown_period (google.protobuf.duration_pb2.Duration): - Optional. Duration between scaling events. A scaling period - starts after the update operation from the previous event - has completed. - - Bounds: [2m, 1d]. Default: 2m. - """ - - yarn_config = proto.Field( - proto.MESSAGE, - number=1, - message='BasicYarnAutoscalingConfig', - ) - cooldown_period = proto.Field( - proto.MESSAGE, - number=2, - message=duration_pb2.Duration, - ) - - -class BasicYarnAutoscalingConfig(proto.Message): - r"""Basic autoscaling configurations for YARN. - Attributes: - graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): - Required. Timeout for YARN graceful decommissioning of Node - Managers. Specifies the duration to wait for jobs to - complete before forcefully removing workers (and potentially - interrupting jobs). Only applicable to downscaling - operations. - - Bounds: [0s, 1d]. - scale_up_factor (float): - Required. Fraction of average YARN pending memory in the - last cooldown period for which to add workers. A scale-up - factor of 1.0 will result in scaling up so that there is no - pending memory remaining after the update (more aggressive - scaling). A scale-up factor closer to 0 will result in a - smaller magnitude of scaling up (less aggressive scaling). - See `How autoscaling - works `__ - for more information. - - Bounds: [0.0, 1.0]. - scale_down_factor (float): - Required. Fraction of average YARN pending memory in the - last cooldown period for which to remove workers. A - scale-down factor of 1 will result in scaling down so that - there is no available memory remaining after the update - (more aggressive scaling). A scale-down factor of 0 disables - removing workers, which can be beneficial for autoscaling a - single job. See `How autoscaling - works `__ - for more information. - - Bounds: [0.0, 1.0]. - scale_up_min_worker_fraction (float): - Optional. Minimum scale-up threshold as a fraction of total - cluster size before scaling occurs. For example, in a - 20-worker cluster, a threshold of 0.1 means the autoscaler - must recommend at least a 2-worker scale-up for the cluster - to scale. A threshold of 0 means the autoscaler will scale - up on any recommended change. - - Bounds: [0.0, 1.0]. Default: 0.0. - scale_down_min_worker_fraction (float): - Optional. Minimum scale-down threshold as a fraction of - total cluster size before scaling occurs. For example, in a - 20-worker cluster, a threshold of 0.1 means the autoscaler - must recommend at least a 2 worker scale-down for the - cluster to scale. A threshold of 0 means the autoscaler will - scale down on any recommended change. - - Bounds: [0.0, 1.0]. Default: 0.0. - """ - - graceful_decommission_timeout = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - scale_up_factor = proto.Field( - proto.DOUBLE, - number=1, - ) - scale_down_factor = proto.Field( - proto.DOUBLE, - number=2, - ) - scale_up_min_worker_fraction = proto.Field( - proto.DOUBLE, - number=3, - ) - scale_down_min_worker_fraction = proto.Field( - proto.DOUBLE, - number=4, - ) - - -class InstanceGroupAutoscalingPolicyConfig(proto.Message): - r"""Configuration for the size bounds of an instance group, - including its proportional size to other groups. - - Attributes: - min_instances (int): - Optional. Minimum number of instances for this group. - - Primary workers - Bounds: [2, max_instances]. Default: 2. - Secondary workers - Bounds: [0, max_instances]. Default: 0. - max_instances (int): - Required. Maximum number of instances for this group. - Required for primary workers. Note that by default, clusters - will not use secondary workers. Required for secondary - workers if the minimum secondary instances is set. - - Primary workers - Bounds: [min_instances, ). Secondary - workers - Bounds: [min_instances, ). Default: 0. - weight (int): - Optional. Weight for the instance group, which is used to - determine the fraction of total workers in the cluster from - this instance group. For example, if primary workers have - weight 2, and secondary workers have weight 1, the cluster - will have approximately 2 primary workers for each secondary - worker. - - The cluster may not reach the specified balance if - constrained by min/max bounds or other autoscaling settings. - For example, if ``max_instances`` for secondary workers is - 0, then only primary workers will be added. The cluster can - also be out of balance when created. - - If weight is not set on any instance group, the cluster will - default to equal weight for all groups: the cluster will - attempt to maintain an equal number of workers in each group - within the configured size bounds for each group. If weight - is set for one group only, the cluster will default to zero - weight on the unset group. For example if weight is set only - on primary workers, the cluster will use primary workers - only and no secondary workers. - """ - - min_instances = proto.Field( - proto.INT32, - number=1, - ) - max_instances = proto.Field( - proto.INT32, - number=2, - ) - weight = proto.Field( - proto.INT32, - number=3, - ) - - -class CreateAutoscalingPolicyRequest(proto.Message): - r"""A request to create an autoscaling policy. - Attributes: - parent (str): - Required. The "resource name" of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.create``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.create``, - the resource name of the location has the following - format: ``projects/{project_id}/locations/{location}`` - policy (google.cloud.dataproc_v1.types.AutoscalingPolicy): - Required. The autoscaling policy to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - policy = proto.Field( - proto.MESSAGE, - number=2, - message='AutoscalingPolicy', - ) - - -class GetAutoscalingPolicyRequest(proto.Message): - r"""A request to fetch an autoscaling policy. - Attributes: - name (str): - Required. The "resource name" of the autoscaling policy, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.get``, the - resource name of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies.get``, the - resource name of the policy has the following format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class UpdateAutoscalingPolicyRequest(proto.Message): - r"""A request to update an autoscaling policy. - Attributes: - policy (google.cloud.dataproc_v1.types.AutoscalingPolicy): - Required. The updated autoscaling policy. - """ - - policy = proto.Field( - proto.MESSAGE, - number=1, - message='AutoscalingPolicy', - ) - - -class DeleteAutoscalingPolicyRequest(proto.Message): - r"""A request to delete an autoscaling policy. - Autoscaling policies in use by one or more clusters will not be - deleted. - - Attributes: - name (str): - Required. The "resource name" of the autoscaling policy, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.delete``, the - resource name of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies.delete``, - the resource name of the policy has the following format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListAutoscalingPoliciesRequest(proto.Message): - r"""A request to list autoscaling policies in a project. - Attributes: - parent (str): - Required. The "resource name" of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.list``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.list``, the - resource name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - page_size (int): - Optional. The maximum number of results to - return in each response. Must be less than or - equal to 1000. Defaults to 100. - page_token (str): - Optional. The page token, returned by a - previous call, to request the next page of - results. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - - -class ListAutoscalingPoliciesResponse(proto.Message): - r"""A response to a request to list autoscaling policies in a - project. - - Attributes: - policies (Sequence[google.cloud.dataproc_v1.types.AutoscalingPolicy]): - Output only. Autoscaling policies list. - next_page_token (str): - Output only. This token is included in the - response if there are more results to fetch. - """ - - @property - def raw_page(self): - return self - - policies = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='AutoscalingPolicy', - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/clusters.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/clusters.py deleted file mode 100644 index f2395056..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/clusters.py +++ /dev/null @@ -1,1797 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.dataproc_v1.types import shared -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.dataproc.v1', - manifest={ - 'Cluster', - 'ClusterConfig', - 'GkeClusterConfig', - 'EndpointConfig', - 'AutoscalingConfig', - 'EncryptionConfig', - 'GceClusterConfig', - 'NodeGroupAffinity', - 'ShieldedInstanceConfig', - 'InstanceGroupConfig', - 'ManagedGroupConfig', - 'AcceleratorConfig', - 'DiskConfig', - 'NodeInitializationAction', - 'ClusterStatus', - 'SecurityConfig', - 'KerberosConfig', - 'IdentityConfig', - 'SoftwareConfig', - 'LifecycleConfig', - 'MetastoreConfig', - 'ClusterMetrics', - 'CreateClusterRequest', - 'UpdateClusterRequest', - 'StopClusterRequest', - 'StartClusterRequest', - 'DeleteClusterRequest', - 'GetClusterRequest', - 'ListClustersRequest', - 'ListClustersResponse', - 'DiagnoseClusterRequest', - 'DiagnoseClusterResults', - 'ReservationAffinity', - }, -) - - -class Cluster(proto.Message): - r"""Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - Attributes: - project_id (str): - Required. The Google Cloud Platform project - ID that the cluster belongs to. - cluster_name (str): - Required. The cluster name. Cluster names - within a project must be unique. Names of - deleted clusters can be reused. - config (google.cloud.dataproc_v1.types.ClusterConfig): - Required. The cluster config. Note that - Dataproc may set default values, and values may - change when clusters are updated. - labels (Sequence[google.cloud.dataproc_v1.types.Cluster.LabelsEntry]): - Optional. The labels to associate with this cluster. Label - **keys** must contain 1 to 63 characters, and must conform - to `RFC 1035 `__. - Label **values** may be empty, but, if present, must contain - 1 to 63 characters, and must conform to `RFC - 1035 `__. No more than - 32 labels can be associated with a cluster. - status (google.cloud.dataproc_v1.types.ClusterStatus): - Output only. Cluster status. - status_history (Sequence[google.cloud.dataproc_v1.types.ClusterStatus]): - Output only. The previous cluster status. - cluster_uuid (str): - Output only. A cluster UUID (Unique Universal - Identifier). Dataproc generates this value when - it creates the cluster. - metrics (google.cloud.dataproc_v1.types.ClusterMetrics): - Output only. Contains cluster daemon metrics such as HDFS - and YARN stats. - - **Beta Feature**: This report is available for testing - purposes only. It may be changed before final release. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - cluster_name = proto.Field( - proto.STRING, - number=2, - ) - config = proto.Field( - proto.MESSAGE, - number=3, - message='ClusterConfig', - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=8, - ) - status = proto.Field( - proto.MESSAGE, - number=4, - message='ClusterStatus', - ) - status_history = proto.RepeatedField( - proto.MESSAGE, - number=7, - message='ClusterStatus', - ) - cluster_uuid = proto.Field( - proto.STRING, - number=6, - ) - metrics = proto.Field( - proto.MESSAGE, - number=9, - message='ClusterMetrics', - ) - - -class ClusterConfig(proto.Message): - r"""The cluster config. - Attributes: - config_bucket (str): - Optional. A Cloud Storage bucket used to stage job - dependencies, config files, and job driver console output. - If you do not specify a staging bucket, Cloud Dataproc will - determine a Cloud Storage location (US, ASIA, or EU) for - your cluster's staging bucket according to the Compute - Engine zone where your cluster is deployed, and then create - and manage this project-level, per-location bucket (see - `Dataproc staging - bucket `__). - **This field requires a Cloud Storage bucket name, not a URI - to a Cloud Storage bucket.** - temp_bucket (str): - Optional. A Cloud Storage bucket used to store ephemeral - cluster and jobs data, such as Spark and MapReduce history - files. If you do not specify a temp bucket, Dataproc will - determine a Cloud Storage location (US, ASIA, or EU) for - your cluster's temp bucket according to the Compute Engine - zone where your cluster is deployed, and then create and - manage this project-level, per-location bucket. The default - bucket has a TTL of 90 days, but you can use any TTL (or - none) if you specify a bucket. **This field requires a Cloud - Storage bucket name, not a URI to a Cloud Storage bucket.** - gce_cluster_config (google.cloud.dataproc_v1.types.GceClusterConfig): - Optional. The shared Compute Engine config - settings for all instances in a cluster. - master_config (google.cloud.dataproc_v1.types.InstanceGroupConfig): - Optional. The Compute Engine config settings - for the master instance in a cluster. - worker_config (google.cloud.dataproc_v1.types.InstanceGroupConfig): - Optional. The Compute Engine config settings - for worker instances in a cluster. - secondary_worker_config (google.cloud.dataproc_v1.types.InstanceGroupConfig): - Optional. The Compute Engine config settings - for additional worker instances in a cluster. - software_config (google.cloud.dataproc_v1.types.SoftwareConfig): - Optional. The config settings for software - inside the cluster. - initialization_actions (Sequence[google.cloud.dataproc_v1.types.NodeInitializationAction]): - Optional. Commands to execute on each node after config is - completed. By default, executables are run on master and all - worker nodes. You can test a node's ``role`` metadata to run - an executable on a master or worker node, as shown below - using ``curl`` (you can also use ``wget``): - - :: - - ROLE=$(curl -H Metadata-Flavor:Google - http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) - if [[ "${ROLE}" == 'Master' ]]; then - ... master specific actions ... - else - ... worker specific actions ... - fi - encryption_config (google.cloud.dataproc_v1.types.EncryptionConfig): - Optional. Encryption settings for the - cluster. - autoscaling_config (google.cloud.dataproc_v1.types.AutoscalingConfig): - Optional. Autoscaling config for the policy - associated with the cluster. Cluster does not - autoscale if this field is unset. - security_config (google.cloud.dataproc_v1.types.SecurityConfig): - Optional. Security settings for the cluster. - lifecycle_config (google.cloud.dataproc_v1.types.LifecycleConfig): - Optional. Lifecycle setting for the cluster. - endpoint_config (google.cloud.dataproc_v1.types.EndpointConfig): - Optional. Port/endpoint configuration for - this cluster - metastore_config (google.cloud.dataproc_v1.types.MetastoreConfig): - Optional. Metastore configuration. - gke_cluster_config (google.cloud.dataproc_v1.types.GkeClusterConfig): - Optional. BETA. The Kubernetes Engine config for Dataproc - clusters deployed to Kubernetes. Setting this is considered - mutually exclusive with Compute Engine-based options such as - ``gce_cluster_config``, ``master_config``, - ``worker_config``, ``secondary_worker_config``, and - ``autoscaling_config``. - """ - - config_bucket = proto.Field( - proto.STRING, - number=1, - ) - temp_bucket = proto.Field( - proto.STRING, - number=2, - ) - gce_cluster_config = proto.Field( - proto.MESSAGE, - number=8, - message='GceClusterConfig', - ) - master_config = proto.Field( - proto.MESSAGE, - number=9, - message='InstanceGroupConfig', - ) - worker_config = proto.Field( - proto.MESSAGE, - number=10, - message='InstanceGroupConfig', - ) - secondary_worker_config = proto.Field( - proto.MESSAGE, - number=12, - message='InstanceGroupConfig', - ) - software_config = proto.Field( - proto.MESSAGE, - number=13, - message='SoftwareConfig', - ) - initialization_actions = proto.RepeatedField( - proto.MESSAGE, - number=11, - message='NodeInitializationAction', - ) - encryption_config = proto.Field( - proto.MESSAGE, - number=15, - message='EncryptionConfig', - ) - autoscaling_config = proto.Field( - proto.MESSAGE, - number=18, - message='AutoscalingConfig', - ) - security_config = proto.Field( - proto.MESSAGE, - number=16, - message='SecurityConfig', - ) - lifecycle_config = proto.Field( - proto.MESSAGE, - number=17, - message='LifecycleConfig', - ) - endpoint_config = proto.Field( - proto.MESSAGE, - number=19, - message='EndpointConfig', - ) - metastore_config = proto.Field( - proto.MESSAGE, - number=20, - message='MetastoreConfig', - ) - gke_cluster_config = proto.Field( - proto.MESSAGE, - number=21, - message='GkeClusterConfig', - ) - - -class GkeClusterConfig(proto.Message): - r"""The GKE config for this cluster. - Attributes: - namespaced_gke_deployment_target (google.cloud.dataproc_v1.types.GkeClusterConfig.NamespacedGkeDeploymentTarget): - Optional. A target for the deployment. - """ - - class NamespacedGkeDeploymentTarget(proto.Message): - r"""A full, namespace-isolated deployment target for an existing - GKE cluster. - - Attributes: - target_gke_cluster (str): - Optional. The target GKE cluster to deploy to. Format: - 'projects/{project}/locations/{location}/clusters/{cluster_id}' - cluster_namespace (str): - Optional. A namespace within the GKE cluster - to deploy into. - """ - - target_gke_cluster = proto.Field( - proto.STRING, - number=1, - ) - cluster_namespace = proto.Field( - proto.STRING, - number=2, - ) - - namespaced_gke_deployment_target = proto.Field( - proto.MESSAGE, - number=1, - message=NamespacedGkeDeploymentTarget, - ) - - -class EndpointConfig(proto.Message): - r"""Endpoint config for this cluster - Attributes: - http_ports (Sequence[google.cloud.dataproc_v1.types.EndpointConfig.HttpPortsEntry]): - Output only. The map of port descriptions to URLs. Will only - be populated if enable_http_port_access is true. - enable_http_port_access (bool): - Optional. If true, enable http access to - specific ports on the cluster from external - sources. Defaults to false. - """ - - http_ports = proto.MapField( - proto.STRING, - proto.STRING, - number=1, - ) - enable_http_port_access = proto.Field( - proto.BOOL, - number=2, - ) - - -class AutoscalingConfig(proto.Message): - r"""Autoscaling Policy config associated with the cluster. - Attributes: - policy_uri (str): - Optional. The autoscaling policy used by the cluster. - - Only resource names including projectid and location - (region) are valid. Examples: - - - ``https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`` - - ``projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`` - - Note that the policy must be in the same project and - Dataproc region. - """ - - policy_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class EncryptionConfig(proto.Message): - r"""Encryption settings for the cluster. - Attributes: - gce_pd_kms_key_name (str): - Optional. The Cloud KMS key name to use for - PD disk encryption for all instances in the - cluster. - """ - - gce_pd_kms_key_name = proto.Field( - proto.STRING, - number=1, - ) - - -class GceClusterConfig(proto.Message): - r"""Common config settings for resources of Compute Engine - cluster instances, applicable to all instances in the cluster. - - Attributes: - zone_uri (str): - Optional. The zone where the Compute Engine cluster will be - located. On a create request, it is required in the "global" - region. If omitted in a non-global Dataproc region, the - service will pick a zone in the corresponding Compute Engine - region. On a get request, zone will always be present. - - A full URL, partial URI, or short name are valid. Examples: - - - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`` - - ``projects/[project_id]/zones/[zone]`` - - ``us-central1-f`` - network_uri (str): - Optional. The Compute Engine network to be used for machine - communications. Cannot be specified with subnetwork_uri. If - neither ``network_uri`` nor ``subnetwork_uri`` is specified, - the "default" network of the project is used, if it exists. - Cannot be a "Custom Subnet Network" (see `Using - Subnetworks `__ - for more information). - - A full URL, partial URI, or short name are valid. Examples: - - - ``https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`` - - ``projects/[project_id]/regions/global/default`` - - ``default`` - subnetwork_uri (str): - Optional. The Compute Engine subnetwork to be used for - machine communications. Cannot be specified with - network_uri. - - A full URL, partial URI, or short name are valid. Examples: - - - ``https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0`` - - ``projects/[project_id]/regions/us-east1/subnetworks/sub0`` - - ``sub0`` - internal_ip_only (bool): - Optional. If true, all instances in the cluster will only - have internal IP addresses. By default, clusters are not - restricted to internal IP addresses, and will have ephemeral - external IP addresses assigned to each instance. This - ``internal_ip_only`` restriction can only be enabled for - subnetwork enabled networks, and all off-cluster - dependencies must be configured to be accessible without - external IP addresses. - private_ipv6_google_access (google.cloud.dataproc_v1.types.GceClusterConfig.PrivateIpv6GoogleAccess): - Optional. The type of IPv6 access for a - cluster. - service_account (str): - Optional. The `Dataproc service - account `__ - (also see `VM Data Plane - identity `__) - used by Dataproc cluster VM instances to access Google Cloud - Platform services. - - If not specified, the `Compute Engine default service - account `__ - is used. - service_account_scopes (Sequence[str]): - Optional. The URIs of service account scopes to be included - in Compute Engine instances. The following base set of - scopes is always included: - - - https://www.googleapis.com/auth/cloud.useraccounts.readonly - - https://www.googleapis.com/auth/devstorage.read_write - - https://www.googleapis.com/auth/logging.write - - If no scopes are specified, the following defaults are also - provided: - - - https://www.googleapis.com/auth/bigquery - - https://www.googleapis.com/auth/bigtable.admin.table - - https://www.googleapis.com/auth/bigtable.data - - https://www.googleapis.com/auth/devstorage.full_control - tags (Sequence[str]): - The Compute Engine tags to add to all instances (see - `Tagging - instances `__). - metadata (Sequence[google.cloud.dataproc_v1.types.GceClusterConfig.MetadataEntry]): - The Compute Engine metadata entries to add to all instances - (see `Project and instance - metadata `__). - reservation_affinity (google.cloud.dataproc_v1.types.ReservationAffinity): - Optional. Reservation Affinity for consuming - Zonal reservation. - node_group_affinity (google.cloud.dataproc_v1.types.NodeGroupAffinity): - Optional. Node Group Affinity for sole-tenant - clusters. - shielded_instance_config (google.cloud.dataproc_v1.types.ShieldedInstanceConfig): - Optional. Shielded Instance Config for clusters using - `Compute Engine Shielded - VMs `__. - """ - class PrivateIpv6GoogleAccess(proto.Enum): - r"""``PrivateIpv6GoogleAccess`` controls whether and how Dataproc - cluster nodes can communicate with Google Services through gRPC over - IPv6. These values are directly mapped to corresponding values in - the `Compute Engine Instance - fields `__. - """ - PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED = 0 - INHERIT_FROM_SUBNETWORK = 1 - OUTBOUND = 2 - BIDIRECTIONAL = 3 - - zone_uri = proto.Field( - proto.STRING, - number=1, - ) - network_uri = proto.Field( - proto.STRING, - number=2, - ) - subnetwork_uri = proto.Field( - proto.STRING, - number=6, - ) - internal_ip_only = proto.Field( - proto.BOOL, - number=7, - ) - private_ipv6_google_access = proto.Field( - proto.ENUM, - number=12, - enum=PrivateIpv6GoogleAccess, - ) - service_account = proto.Field( - proto.STRING, - number=8, - ) - service_account_scopes = proto.RepeatedField( - proto.STRING, - number=3, - ) - tags = proto.RepeatedField( - proto.STRING, - number=4, - ) - metadata = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - reservation_affinity = proto.Field( - proto.MESSAGE, - number=11, - message='ReservationAffinity', - ) - node_group_affinity = proto.Field( - proto.MESSAGE, - number=13, - message='NodeGroupAffinity', - ) - shielded_instance_config = proto.Field( - proto.MESSAGE, - number=14, - message='ShieldedInstanceConfig', - ) - - -class NodeGroupAffinity(proto.Message): - r"""Node Group Affinity for clusters using sole-tenant node - groups. - - Attributes: - node_group_uri (str): - Required. The URI of a sole-tenant `node group - resource `__ - that the cluster will be created on. - - A full URL, partial URI, or node group name are valid. - Examples: - - - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`` - - ``projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1`` - - ``node-group-1`` - """ - - node_group_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class ShieldedInstanceConfig(proto.Message): - r"""Shielded Instance Config for clusters using `Compute Engine Shielded - VMs `__. - - Attributes: - enable_secure_boot (bool): - Optional. Defines whether instances have - Secure Boot enabled. - enable_vtpm (bool): - Optional. Defines whether instances have the - vTPM enabled. - enable_integrity_monitoring (bool): - Optional. Defines whether instances have - integrity monitoring enabled. - """ - - enable_secure_boot = proto.Field( - proto.BOOL, - number=1, - ) - enable_vtpm = proto.Field( - proto.BOOL, - number=2, - ) - enable_integrity_monitoring = proto.Field( - proto.BOOL, - number=3, - ) - - -class InstanceGroupConfig(proto.Message): - r"""The config settings for Compute Engine resources in - an instance group, such as a master or worker group. - - Attributes: - num_instances (int): - Optional. The number of VM instances in the instance group. - For `HA - cluster `__ - `master_config <#FIELDS.master_config>`__ groups, **must be - set to 3**. For standard cluster - `master_config <#FIELDS.master_config>`__ groups, **must be - set to 1**. - instance_names (Sequence[str]): - Output only. The list of instance names. Dataproc derives - the names from ``cluster_name``, ``num_instances``, and the - instance group. - image_uri (str): - Optional. The Compute Engine image resource used for cluster - instances. - - The URI can represent an image or image family. - - Image examples: - - - ``https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`` - - ``projects/[project_id]/global/images/[image-id]`` - - ``image-id`` - - Image family examples. Dataproc will use the most recent - image from the family: - - - ``https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`` - - ``projects/[project_id]/global/images/family/[custom-image-family-name]`` - - If the URI is unspecified, it will be inferred from - ``SoftwareConfig.image_version`` or the system default. - machine_type_uri (str): - Optional. The Compute Engine machine type used for cluster - instances. - - A full URL, partial URI, or short name are valid. Examples: - - - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` - - ``projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` - - ``n1-standard-2`` - - **Auto Zone Exception**: If you are using the Dataproc `Auto - Zone - Placement `__ - feature, you must use the short name of the machine type - resource, for example, ``n1-standard-2``. - disk_config (google.cloud.dataproc_v1.types.DiskConfig): - Optional. Disk option config settings. - is_preemptible (bool): - Output only. Specifies that this instance - group contains preemptible instances. - preemptibility (google.cloud.dataproc_v1.types.InstanceGroupConfig.Preemptibility): - Optional. Specifies the preemptibility of the instance - group. - - The default value for master and worker groups is - ``NON_PREEMPTIBLE``. This default cannot be changed. - - The default value for secondary instances is - ``PREEMPTIBLE``. - managed_group_config (google.cloud.dataproc_v1.types.ManagedGroupConfig): - Output only. The config for Compute Engine - Instance Group Manager that manages this group. - This is only used for preemptible instance - groups. - accelerators (Sequence[google.cloud.dataproc_v1.types.AcceleratorConfig]): - Optional. The Compute Engine accelerator - configuration for these instances. - min_cpu_platform (str): - Optional. Specifies the minimum cpu platform for the - Instance Group. See `Dataproc -> Minimum CPU - Platform `__. - """ - class Preemptibility(proto.Enum): - r"""Controls the use of [preemptible instances] - (https://cloud.google.com/compute/docs/instances/preemptible) within - the group. - """ - PREEMPTIBILITY_UNSPECIFIED = 0 - NON_PREEMPTIBLE = 1 - PREEMPTIBLE = 2 - - num_instances = proto.Field( - proto.INT32, - number=1, - ) - instance_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - image_uri = proto.Field( - proto.STRING, - number=3, - ) - machine_type_uri = proto.Field( - proto.STRING, - number=4, - ) - disk_config = proto.Field( - proto.MESSAGE, - number=5, - message='DiskConfig', - ) - is_preemptible = proto.Field( - proto.BOOL, - number=6, - ) - preemptibility = proto.Field( - proto.ENUM, - number=10, - enum=Preemptibility, - ) - managed_group_config = proto.Field( - proto.MESSAGE, - number=7, - message='ManagedGroupConfig', - ) - accelerators = proto.RepeatedField( - proto.MESSAGE, - number=8, - message='AcceleratorConfig', - ) - min_cpu_platform = proto.Field( - proto.STRING, - number=9, - ) - - -class ManagedGroupConfig(proto.Message): - r"""Specifies the resources used to actively manage an instance - group. - - Attributes: - instance_template_name (str): - Output only. The name of the Instance - Template used for the Managed Instance Group. - instance_group_manager_name (str): - Output only. The name of the Instance Group - Manager for this group. - """ - - instance_template_name = proto.Field( - proto.STRING, - number=1, - ) - instance_group_manager_name = proto.Field( - proto.STRING, - number=2, - ) - - -class AcceleratorConfig(proto.Message): - r"""Specifies the type and number of accelerator cards attached to the - instances of an instance. See `GPUs on Compute - Engine `__. - - Attributes: - accelerator_type_uri (str): - Full URL, partial URI, or short name of the accelerator type - resource to expose to this instance. See `Compute Engine - AcceleratorTypes `__. - - Examples: - - - ``https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` - - ``projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` - - ``nvidia-tesla-k80`` - - **Auto Zone Exception**: If you are using the Dataproc `Auto - Zone - Placement `__ - feature, you must use the short name of the accelerator type - resource, for example, ``nvidia-tesla-k80``. - accelerator_count (int): - The number of the accelerator cards of this - type exposed to this instance. - """ - - accelerator_type_uri = proto.Field( - proto.STRING, - number=1, - ) - accelerator_count = proto.Field( - proto.INT32, - number=2, - ) - - -class DiskConfig(proto.Message): - r"""Specifies the config of disk options for a group of VM - instances. - - Attributes: - boot_disk_type (str): - Optional. Type of the boot disk (default is "pd-standard"). - Valid values: "pd-balanced" (Persistent Disk Balanced Solid - State Drive), "pd-ssd" (Persistent Disk Solid State Drive), - or "pd-standard" (Persistent Disk Hard Disk Drive). See - `Disk - types `__. - boot_disk_size_gb (int): - Optional. Size in GB of the boot disk - (default is 500GB). - num_local_ssds (int): - Optional. Number of attached SSDs, from 0 to 4 (default is - 0). If SSDs are not attached, the boot disk is used to store - runtime logs and - `HDFS `__ - data. If one or more SSDs are attached, this runtime bulk - data is spread across them, and the boot disk contains only - basic config and installed binaries. - """ - - boot_disk_type = proto.Field( - proto.STRING, - number=3, - ) - boot_disk_size_gb = proto.Field( - proto.INT32, - number=1, - ) - num_local_ssds = proto.Field( - proto.INT32, - number=2, - ) - - -class NodeInitializationAction(proto.Message): - r"""Specifies an executable to run on a fully configured node and - a timeout period for executable completion. - - Attributes: - executable_file (str): - Required. Cloud Storage URI of executable - file. - execution_timeout (google.protobuf.duration_pb2.Duration): - Optional. Amount of time executable has to complete. Default - is 10 minutes (see JSON representation of - `Duration `__). - - Cluster creation fails with an explanatory error message - (the name of the executable that caused the error and the - exceeded timeout period) if the executable is not completed - at end of the timeout period. - """ - - executable_file = proto.Field( - proto.STRING, - number=1, - ) - execution_timeout = proto.Field( - proto.MESSAGE, - number=2, - message=duration_pb2.Duration, - ) - - -class ClusterStatus(proto.Message): - r"""The status of a cluster and its instances. - Attributes: - state (google.cloud.dataproc_v1.types.ClusterStatus.State): - Output only. The cluster's state. - detail (str): - Optional. Output only. Details of cluster's - state. - state_start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when this state was entered (see JSON - representation of - `Timestamp `__). - substate (google.cloud.dataproc_v1.types.ClusterStatus.Substate): - Output only. Additional state information - that includes status reported by the agent. - """ - class State(proto.Enum): - r"""The cluster state.""" - UNKNOWN = 0 - CREATING = 1 - RUNNING = 2 - ERROR = 3 - DELETING = 4 - UPDATING = 5 - STOPPING = 6 - STOPPED = 7 - STARTING = 8 - - class Substate(proto.Enum): - r"""The cluster substate.""" - UNSPECIFIED = 0 - UNHEALTHY = 1 - STALE_STATUS = 2 - - state = proto.Field( - proto.ENUM, - number=1, - enum=State, - ) - detail = proto.Field( - proto.STRING, - number=2, - ) - state_start_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - substate = proto.Field( - proto.ENUM, - number=4, - enum=Substate, - ) - - -class SecurityConfig(proto.Message): - r"""Security related configuration, including encryption, - Kerberos, etc. - - Attributes: - kerberos_config (google.cloud.dataproc_v1.types.KerberosConfig): - Optional. Kerberos related configuration. - identity_config (google.cloud.dataproc_v1.types.IdentityConfig): - Optional. Identity related configuration, - including service account based secure multi- - tenancy user mappings. - """ - - kerberos_config = proto.Field( - proto.MESSAGE, - number=1, - message='KerberosConfig', - ) - identity_config = proto.Field( - proto.MESSAGE, - number=2, - message='IdentityConfig', - ) - - -class KerberosConfig(proto.Message): - r"""Specifies Kerberos related configuration. - Attributes: - enable_kerberos (bool): - Optional. Flag to indicate whether to - Kerberize the cluster (default: false). Set this - field to true to enable Kerberos on a cluster. - root_principal_password_uri (str): - Optional. The Cloud Storage URI of a KMS - encrypted file containing the root principal - password. - kms_key_uri (str): - Optional. The uri of the KMS key used to - encrypt various sensitive files. - keystore_uri (str): - Optional. The Cloud Storage URI of the - keystore file used for SSL encryption. If not - provided, Dataproc will provide a self-signed - certificate. - truststore_uri (str): - Optional. The Cloud Storage URI of the - truststore file used for SSL encryption. If not - provided, Dataproc will provide a self-signed - certificate. - keystore_password_uri (str): - Optional. The Cloud Storage URI of a KMS - encrypted file containing the password to the - user provided keystore. For the self-signed - certificate, this password is generated by - Dataproc. - key_password_uri (str): - Optional. The Cloud Storage URI of a KMS - encrypted file containing the password to the - user provided key. For the self-signed - certificate, this password is generated by - Dataproc. - truststore_password_uri (str): - Optional. The Cloud Storage URI of a KMS - encrypted file containing the password to the - user provided truststore. For the self-signed - certificate, this password is generated by - Dataproc. - cross_realm_trust_realm (str): - Optional. The remote realm the Dataproc on- - luster KDC will trust, should the user enable - cross realm trust. - cross_realm_trust_kdc (str): - Optional. The KDC (IP or hostname) for the - remote trusted realm in a cross realm trust - relationship. - cross_realm_trust_admin_server (str): - Optional. The admin server (IP or hostname) - for the remote trusted realm in a cross realm - trust relationship. - cross_realm_trust_shared_password_uri (str): - Optional. The Cloud Storage URI of a KMS - encrypted file containing the shared password - between the on-cluster Kerberos realm and the - remote trusted realm, in a cross realm trust - relationship. - kdc_db_key_uri (str): - Optional. The Cloud Storage URI of a KMS - encrypted file containing the master key of the - KDC database. - tgt_lifetime_hours (int): - Optional. The lifetime of the ticket granting - ticket, in hours. If not specified, or user - specifies 0, then default value 10 will be used. - realm (str): - Optional. The name of the on-cluster Kerberos - realm. If not specified, the uppercased domain - of hostnames will be the realm. - """ - - enable_kerberos = proto.Field( - proto.BOOL, - number=1, - ) - root_principal_password_uri = proto.Field( - proto.STRING, - number=2, - ) - kms_key_uri = proto.Field( - proto.STRING, - number=3, - ) - keystore_uri = proto.Field( - proto.STRING, - number=4, - ) - truststore_uri = proto.Field( - proto.STRING, - number=5, - ) - keystore_password_uri = proto.Field( - proto.STRING, - number=6, - ) - key_password_uri = proto.Field( - proto.STRING, - number=7, - ) - truststore_password_uri = proto.Field( - proto.STRING, - number=8, - ) - cross_realm_trust_realm = proto.Field( - proto.STRING, - number=9, - ) - cross_realm_trust_kdc = proto.Field( - proto.STRING, - number=10, - ) - cross_realm_trust_admin_server = proto.Field( - proto.STRING, - number=11, - ) - cross_realm_trust_shared_password_uri = proto.Field( - proto.STRING, - number=12, - ) - kdc_db_key_uri = proto.Field( - proto.STRING, - number=13, - ) - tgt_lifetime_hours = proto.Field( - proto.INT32, - number=14, - ) - realm = proto.Field( - proto.STRING, - number=15, - ) - - -class IdentityConfig(proto.Message): - r"""Identity related configuration, including service account - based secure multi-tenancy user mappings. - - Attributes: - user_service_account_mapping (Sequence[google.cloud.dataproc_v1.types.IdentityConfig.UserServiceAccountMappingEntry]): - Required. Map of user to service account. - """ - - user_service_account_mapping = proto.MapField( - proto.STRING, - proto.STRING, - number=1, - ) - - -class SoftwareConfig(proto.Message): - r"""Specifies the selection and config of software inside the - cluster. - - Attributes: - image_version (str): - Optional. The version of software inside the cluster. It - must be one of the supported `Dataproc - Versions `__, - such as "1.2" (including a subminor version, such as - "1.2.29"), or the `"preview" - version `__. - If unspecified, it defaults to the latest Debian version. - properties (Sequence[google.cloud.dataproc_v1.types.SoftwareConfig.PropertiesEntry]): - Optional. The properties to set on daemon config files. - - Property keys are specified in ``prefix:property`` format, - for example ``core:hadoop.tmp.dir``. The following are - supported prefixes and their mappings: - - - capacity-scheduler: ``capacity-scheduler.xml`` - - core: ``core-site.xml`` - - distcp: ``distcp-default.xml`` - - hdfs: ``hdfs-site.xml`` - - hive: ``hive-site.xml`` - - mapred: ``mapred-site.xml`` - - pig: ``pig.properties`` - - spark: ``spark-defaults.conf`` - - yarn: ``yarn-site.xml`` - - For more information, see `Cluster - properties `__. - optional_components (Sequence[google.cloud.dataproc_v1.types.Component]): - Optional. The set of components to activate - on the cluster. - """ - - image_version = proto.Field( - proto.STRING, - number=1, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - optional_components = proto.RepeatedField( - proto.ENUM, - number=3, - enum=shared.Component, - ) - - -class LifecycleConfig(proto.Message): - r"""Specifies the cluster auto-delete schedule configuration. - Attributes: - idle_delete_ttl (google.protobuf.duration_pb2.Duration): - Optional. The duration to keep the cluster alive while - idling (when no jobs are running). Passing this threshold - will cause the cluster to be deleted. Minimum value is 5 - minutes; maximum value is 14 days (see JSON representation - of - `Duration `__). - auto_delete_time (google.protobuf.timestamp_pb2.Timestamp): - Optional. The time when cluster will be auto-deleted (see - JSON representation of - `Timestamp `__). - auto_delete_ttl (google.protobuf.duration_pb2.Duration): - Optional. The lifetime duration of cluster. The cluster will - be auto-deleted at the end of this period. Minimum value is - 10 minutes; maximum value is 14 days (see JSON - representation of - `Duration `__). - idle_start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time when cluster became idle (most recent - job finished) and became eligible for deletion due to - idleness (see JSON representation of - `Timestamp `__). - """ - - idle_delete_ttl = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - auto_delete_time = proto.Field( - proto.MESSAGE, - number=2, - oneof='ttl', - message=timestamp_pb2.Timestamp, - ) - auto_delete_ttl = proto.Field( - proto.MESSAGE, - number=3, - oneof='ttl', - message=duration_pb2.Duration, - ) - idle_start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class MetastoreConfig(proto.Message): - r"""Specifies a Metastore configuration. - Attributes: - dataproc_metastore_service (str): - Required. Resource name of an existing Dataproc Metastore - service. - - Example: - - - ``projects/[project_id]/locations/[dataproc_region]/services/[service-name]`` - """ - - dataproc_metastore_service = proto.Field( - proto.STRING, - number=1, - ) - - -class ClusterMetrics(proto.Message): - r"""Contains cluster daemon metrics, such as HDFS and YARN stats. - - **Beta Feature**: This report is available for testing purposes - only. It may be changed before final release. - - Attributes: - hdfs_metrics (Sequence[google.cloud.dataproc_v1.types.ClusterMetrics.HdfsMetricsEntry]): - The HDFS metrics. - yarn_metrics (Sequence[google.cloud.dataproc_v1.types.ClusterMetrics.YarnMetricsEntry]): - The YARN metrics. - """ - - hdfs_metrics = proto.MapField( - proto.STRING, - proto.INT64, - number=1, - ) - yarn_metrics = proto.MapField( - proto.STRING, - proto.INT64, - number=2, - ) - - -class CreateClusterRequest(proto.Message): - r"""A request to create a cluster. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - cluster (google.cloud.dataproc_v1.types.Cluster): - Required. The cluster to create. - request_id (str): - Optional. A unique id used to identify the request. If the - server receives two - `CreateClusterRequest `__\ s - with the same id, then the second request will be ignored - and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - cluster = proto.Field( - proto.MESSAGE, - number=2, - message='Cluster', - ) - request_id = proto.Field( - proto.STRING, - number=4, - ) - - -class UpdateClusterRequest(proto.Message): - r"""A request to update a cluster. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - cluster_name (str): - Required. The cluster name. - cluster (google.cloud.dataproc_v1.types.Cluster): - Required. The changes to the cluster. - graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): - Optional. Timeout for graceful YARN decomissioning. Graceful - decommissioning allows removing nodes from the cluster - without interrupting jobs in progress. Timeout specifies how - long to wait for jobs in progress to finish before - forcefully removing nodes (and potentially interrupting - jobs). Default timeout is 0 (for forceful decommission), and - the maximum allowed timeout is 1 day. (see JSON - representation of - `Duration `__). - - Only supported on Dataproc image versions 1.2 and higher. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Specifies the path, relative to ``Cluster``, of - the field to update. For example, to change the number of - workers in a cluster to 5, the ``update_mask`` parameter - would be specified as - ``config.worker_config.num_instances``, and the ``PATCH`` - request body would specify the new value, as follows: - - :: - - { - "config":{ - "workerConfig":{ - "numInstances":"5" - } - } - } - - Similarly, to change the number of preemptible workers in a - cluster to 5, the ``update_mask`` parameter would be - ``config.secondary_worker_config.num_instances``, and the - ``PATCH`` request body would be set as follows: - - :: - - { - "config":{ - "secondaryWorkerConfig":{ - "numInstances":"5" - } - } - } - - Note: Currently, only the following fields can be updated: - - .. raw:: html - - - - - - - - - - - - - - - - - - - - - - - -
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or - change autoscaling policies
- request_id (str): - Optional. A unique id used to identify the request. If the - server receives two - `UpdateClusterRequest `__\ s - with the same id, then the second request will be ignored - and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=5, - ) - cluster_name = proto.Field( - proto.STRING, - number=2, - ) - cluster = proto.Field( - proto.MESSAGE, - number=3, - message='Cluster', - ) - graceful_decommission_timeout = proto.Field( - proto.MESSAGE, - number=6, - message=duration_pb2.Duration, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=4, - message=field_mask_pb2.FieldMask, - ) - request_id = proto.Field( - proto.STRING, - number=7, - ) - - -class StopClusterRequest(proto.Message): - r"""A request to stop a cluster. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - cluster_name (str): - Required. The cluster name. - cluster_uuid (str): - Optional. Specifying the ``cluster_uuid`` means the RPC will - fail (with error NOT_FOUND) if a cluster with the specified - UUID does not exist. - request_id (str): - Optional. A unique id used to identify the request. If the - server receives two - `StopClusterRequest `__\ s - with the same id, then the second request will be ignored - and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. - - Recommendation: Set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=2, - ) - cluster_name = proto.Field( - proto.STRING, - number=3, - ) - cluster_uuid = proto.Field( - proto.STRING, - number=4, - ) - request_id = proto.Field( - proto.STRING, - number=5, - ) - - -class StartClusterRequest(proto.Message): - r"""A request to start a cluster. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - cluster_name (str): - Required. The cluster name. - cluster_uuid (str): - Optional. Specifying the ``cluster_uuid`` means the RPC will - fail (with error NOT_FOUND) if a cluster with the specified - UUID does not exist. - request_id (str): - Optional. A unique id used to identify the request. If the - server receives two - `StartClusterRequest `__\ s - with the same id, then the second request will be ignored - and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. - - Recommendation: Set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=2, - ) - cluster_name = proto.Field( - proto.STRING, - number=3, - ) - cluster_uuid = proto.Field( - proto.STRING, - number=4, - ) - request_id = proto.Field( - proto.STRING, - number=5, - ) - - -class DeleteClusterRequest(proto.Message): - r"""A request to delete a cluster. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - cluster_name (str): - Required. The cluster name. - cluster_uuid (str): - Optional. Specifying the ``cluster_uuid`` means the RPC - should fail (with error NOT_FOUND) if cluster with specified - UUID does not exist. - request_id (str): - Optional. A unique id used to identify the request. If the - server receives two - `DeleteClusterRequest `__\ s - with the same id, then the second request will be ignored - and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - cluster_name = proto.Field( - proto.STRING, - number=2, - ) - cluster_uuid = proto.Field( - proto.STRING, - number=4, - ) - request_id = proto.Field( - proto.STRING, - number=5, - ) - - -class GetClusterRequest(proto.Message): - r"""Request to get the resource representation for a cluster in a - project. - - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - cluster_name (str): - Required. The cluster name. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - cluster_name = proto.Field( - proto.STRING, - number=2, - ) - - -class ListClustersRequest(proto.Message): - r"""A request to list the clusters in a project. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - filter (str): - Optional. A filter constraining the clusters to list. - Filters are case-sensitive and have the following syntax: - - field = value [AND [field = value]] ... - - where **field** is one of ``status.state``, ``clusterName``, - or ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** - can be ``*`` to match all values. ``status.state`` can be - one of the following: ``ACTIVE``, ``INACTIVE``, - ``CREATING``, ``RUNNING``, ``ERROR``, ``DELETING``, or - ``UPDATING``. ``ACTIVE`` contains the ``CREATING``, - ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains - the ``DELETING`` and ``ERROR`` states. ``clusterName`` is - the name of the cluster provided at creation time. Only the - logical ``AND`` operator is supported; space-separated items - are treated as having an implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND clusterName = mycluster AND - labels.env = staging AND labels.starred = \* - page_size (int): - Optional. The standard List page size. - page_token (str): - Optional. The standard List page token. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=4, - ) - filter = proto.Field( - proto.STRING, - number=5, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - - -class ListClustersResponse(proto.Message): - r"""The list of all clusters in a project. - Attributes: - clusters (Sequence[google.cloud.dataproc_v1.types.Cluster]): - Output only. The clusters in the project. - next_page_token (str): - Output only. This token is included in the response if there - are more results to fetch. To fetch additional results, - provide this value as the ``page_token`` in a subsequent - ``ListClustersRequest``. - """ - - @property - def raw_page(self): - return self - - clusters = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Cluster', - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DiagnoseClusterRequest(proto.Message): - r"""A request to collect cluster diagnostic information. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - cluster_name (str): - Required. The cluster name. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - cluster_name = proto.Field( - proto.STRING, - number=2, - ) - - -class DiagnoseClusterResults(proto.Message): - r"""The location of diagnostic output. - Attributes: - output_uri (str): - Output only. The Cloud Storage URI of the - diagnostic output. The output report is a plain - text file with a summary of collected - diagnostics. - """ - - output_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class ReservationAffinity(proto.Message): - r"""Reservation Affinity for consuming Zonal reservation. - Attributes: - consume_reservation_type (google.cloud.dataproc_v1.types.ReservationAffinity.Type): - Optional. Type of reservation to consume - key (str): - Optional. Corresponds to the label key of - reservation resource. - values (Sequence[str]): - Optional. Corresponds to the label values of - reservation resource. - """ - class Type(proto.Enum): - r"""Indicates whether to consume capacity from an reservation or - not. - """ - TYPE_UNSPECIFIED = 0 - NO_RESERVATION = 1 - ANY_RESERVATION = 2 - SPECIFIC_RESERVATION = 3 - - consume_reservation_type = proto.Field( - proto.ENUM, - number=1, - enum=Type, - ) - key = proto.Field( - proto.STRING, - number=2, - ) - values = proto.RepeatedField( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/jobs.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/jobs.py deleted file mode 100644 index e61a7e97..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/jobs.py +++ /dev/null @@ -1,1368 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.dataproc.v1', - manifest={ - 'LoggingConfig', - 'HadoopJob', - 'SparkJob', - 'PySparkJob', - 'QueryList', - 'HiveJob', - 'SparkSqlJob', - 'PigJob', - 'SparkRJob', - 'PrestoJob', - 'JobPlacement', - 'JobStatus', - 'JobReference', - 'YarnApplication', - 'Job', - 'JobScheduling', - 'SubmitJobRequest', - 'JobMetadata', - 'GetJobRequest', - 'ListJobsRequest', - 'UpdateJobRequest', - 'ListJobsResponse', - 'CancelJobRequest', - 'DeleteJobRequest', - }, -) - - -class LoggingConfig(proto.Message): - r"""The runtime logging config of the job. - Attributes: - driver_log_levels (Sequence[google.cloud.dataproc_v1.types.LoggingConfig.DriverLogLevelsEntry]): - The per-package log levels for the driver. - This may include "root" package name to - configure rootLogger. Examples: - 'com.google = FATAL', 'root = INFO', - 'org.apache = DEBUG' - """ - class Level(proto.Enum): - r"""The Log4j level for job execution. When running an `Apache - Hive `__ job, Cloud Dataproc configures the - Hive client to an equivalent verbosity level. - """ - LEVEL_UNSPECIFIED = 0 - ALL = 1 - TRACE = 2 - DEBUG = 3 - INFO = 4 - WARN = 5 - ERROR = 6 - FATAL = 7 - OFF = 8 - - driver_log_levels = proto.MapField( - proto.STRING, - proto.ENUM, - number=2, - enum=Level, - ) - - -class HadoopJob(proto.Message): - r"""A Dataproc job for running `Apache Hadoop - MapReduce `__ - jobs on `Apache Hadoop - YARN `__. - - Attributes: - main_jar_file_uri (str): - The HCFS URI of the jar file containing the - main class. Examples: - 'gs://foo-bucket/analytics-binaries/extract- - useful-metrics-mr.jar' 'hdfs:/tmp/test- - samples/custom-wordcount.jar' - 'file:///home/usr/lib/hadoop-mapreduce/hadoop- - mapreduce-examples.jar' - main_class (str): - The name of the driver's main class. The jar file containing - the class must be in the default CLASSPATH or specified in - ``jar_file_uris``. - args (Sequence[str]): - Optional. The arguments to pass to the driver. Do not - include arguments, such as ``-libjars`` or ``-Dfoo=bar``, - that can be set as job properties, since a collision may - occur that causes an incorrect job submission. - jar_file_uris (Sequence[str]): - Optional. Jar file URIs to add to the - CLASSPATHs of the Hadoop driver and tasks. - file_uris (Sequence[str]): - Optional. HCFS (Hadoop Compatible Filesystem) - URIs of files to be copied to the working - directory of Hadoop drivers and distributed - tasks. Useful for naively parallel tasks. - archive_uris (Sequence[str]): - Optional. HCFS URIs of archives to be - extracted in the working directory of Hadoop - drivers and tasks. Supported file types: .jar, - .tar, .tar.gz, .tgz, or .zip. - properties (Sequence[google.cloud.dataproc_v1.types.HadoopJob.PropertiesEntry]): - Optional. A mapping of property names to values, used to - configure Hadoop. Properties that conflict with values set - by the Dataproc API may be overwritten. Can include - properties set in /etc/hadoop/conf/*-site and classes in - user code. - logging_config (google.cloud.dataproc_v1.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - main_jar_file_uri = proto.Field( - proto.STRING, - number=1, - oneof='driver', - ) - main_class = proto.Field( - proto.STRING, - number=2, - oneof='driver', - ) - args = proto.RepeatedField( - proto.STRING, - number=3, - ) - jar_file_uris = proto.RepeatedField( - proto.STRING, - number=4, - ) - file_uris = proto.RepeatedField( - proto.STRING, - number=5, - ) - archive_uris = proto.RepeatedField( - proto.STRING, - number=6, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=8, - message='LoggingConfig', - ) - - -class SparkJob(proto.Message): - r"""A Dataproc job for running `Apache - Spark `__ applications on YARN. - - Attributes: - main_jar_file_uri (str): - The HCFS URI of the jar file that contains - the main class. - main_class (str): - The name of the driver's main class. The jar file that - contains the class must be in the default CLASSPATH or - specified in ``jar_file_uris``. - args (Sequence[str]): - Optional. The arguments to pass to the driver. Do not - include arguments, such as ``--conf``, that can be set as - job properties, since a collision may occur that causes an - incorrect job submission. - jar_file_uris (Sequence[str]): - Optional. HCFS URIs of jar files to add to - the CLASSPATHs of the Spark driver and tasks. - file_uris (Sequence[str]): - Optional. HCFS URIs of files to be placed in - the working directory of each executor. Useful - for naively parallel tasks. - archive_uris (Sequence[str]): - Optional. HCFS URIs of archives to be - extracted into the working directory of each - executor. Supported file types: .jar, .tar, - .tar.gz, .tgz, and .zip. - properties (Sequence[google.cloud.dataproc_v1.types.SparkJob.PropertiesEntry]): - Optional. A mapping of property names to - values, used to configure Spark. Properties that - conflict with values set by the Dataproc API may - be overwritten. Can include properties set in - /etc/spark/conf/spark-defaults.conf and classes - in user code. - logging_config (google.cloud.dataproc_v1.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - main_jar_file_uri = proto.Field( - proto.STRING, - number=1, - oneof='driver', - ) - main_class = proto.Field( - proto.STRING, - number=2, - oneof='driver', - ) - args = proto.RepeatedField( - proto.STRING, - number=3, - ) - jar_file_uris = proto.RepeatedField( - proto.STRING, - number=4, - ) - file_uris = proto.RepeatedField( - proto.STRING, - number=5, - ) - archive_uris = proto.RepeatedField( - proto.STRING, - number=6, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=8, - message='LoggingConfig', - ) - - -class PySparkJob(proto.Message): - r"""A Dataproc job for running `Apache - PySpark `__ - applications on YARN. - - Attributes: - main_python_file_uri (str): - Required. The HCFS URI of the main Python - file to use as the driver. Must be a .py file. - args (Sequence[str]): - Optional. The arguments to pass to the driver. Do not - include arguments, such as ``--conf``, that can be set as - job properties, since a collision may occur that causes an - incorrect job submission. - python_file_uris (Sequence[str]): - Optional. HCFS file URIs of Python files to - pass to the PySpark framework. Supported file - types: .py, .egg, and .zip. - jar_file_uris (Sequence[str]): - Optional. HCFS URIs of jar files to add to - the CLASSPATHs of the Python driver and tasks. - file_uris (Sequence[str]): - Optional. HCFS URIs of files to be placed in - the working directory of each executor. Useful - for naively parallel tasks. - archive_uris (Sequence[str]): - Optional. HCFS URIs of archives to be - extracted into the working directory of each - executor. Supported file types: .jar, .tar, - .tar.gz, .tgz, and .zip. - properties (Sequence[google.cloud.dataproc_v1.types.PySparkJob.PropertiesEntry]): - Optional. A mapping of property names to - values, used to configure PySpark. Properties - that conflict with values set by the Dataproc - API may be overwritten. Can include properties - set in - /etc/spark/conf/spark-defaults.conf and classes - in user code. - logging_config (google.cloud.dataproc_v1.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - main_python_file_uri = proto.Field( - proto.STRING, - number=1, - ) - args = proto.RepeatedField( - proto.STRING, - number=2, - ) - python_file_uris = proto.RepeatedField( - proto.STRING, - number=3, - ) - jar_file_uris = proto.RepeatedField( - proto.STRING, - number=4, - ) - file_uris = proto.RepeatedField( - proto.STRING, - number=5, - ) - archive_uris = proto.RepeatedField( - proto.STRING, - number=6, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=8, - message='LoggingConfig', - ) - - -class QueryList(proto.Message): - r"""A list of queries to run on a cluster. - Attributes: - queries (Sequence[str]): - Required. The queries to execute. You do not need to end a - query expression with a semicolon. Multiple queries can be - specified in one string by separating each with a semicolon. - Here is an example of a Dataproc API snippet that uses a - QueryList to specify a HiveJob: - - :: - - "hiveJob": { - "queryList": { - "queries": [ - "query1", - "query2", - "query3;query4", - ] - } - } - """ - - queries = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class HiveJob(proto.Message): - r"""A Dataproc job for running `Apache - Hive `__ queries on YARN. - - Attributes: - query_file_uri (str): - The HCFS URI of the script that contains Hive - queries. - query_list (google.cloud.dataproc_v1.types.QueryList): - A list of queries. - continue_on_failure (bool): - Optional. Whether to continue executing queries if a query - fails. The default value is ``false``. Setting to ``true`` - can be useful when executing independent parallel queries. - script_variables (Sequence[google.cloud.dataproc_v1.types.HiveJob.ScriptVariablesEntry]): - Optional. Mapping of query variable names to values - (equivalent to the Hive command: ``SET name="value";``). - properties (Sequence[google.cloud.dataproc_v1.types.HiveJob.PropertiesEntry]): - Optional. A mapping of property names and values, used to - configure Hive. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in /etc/hadoop/conf/*-site.xml, - /etc/hive/conf/hive-site.xml, and classes in user code. - jar_file_uris (Sequence[str]): - Optional. HCFS URIs of jar files to add to - the CLASSPATH of the Hive server and Hadoop - MapReduce (MR) tasks. Can contain Hive SerDes - and UDFs. - """ - - query_file_uri = proto.Field( - proto.STRING, - number=1, - oneof='queries', - ) - query_list = proto.Field( - proto.MESSAGE, - number=2, - oneof='queries', - message='QueryList', - ) - continue_on_failure = proto.Field( - proto.BOOL, - number=3, - ) - script_variables = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - jar_file_uris = proto.RepeatedField( - proto.STRING, - number=6, - ) - - -class SparkSqlJob(proto.Message): - r"""A Dataproc job for running `Apache Spark - SQL `__ queries. - - Attributes: - query_file_uri (str): - The HCFS URI of the script that contains SQL - queries. - query_list (google.cloud.dataproc_v1.types.QueryList): - A list of queries. - script_variables (Sequence[google.cloud.dataproc_v1.types.SparkSqlJob.ScriptVariablesEntry]): - Optional. Mapping of query variable names to values - (equivalent to the Spark SQL command: SET - ``name="value";``). - properties (Sequence[google.cloud.dataproc_v1.types.SparkSqlJob.PropertiesEntry]): - Optional. A mapping of property names to - values, used to configure Spark SQL's SparkConf. - Properties that conflict with values set by the - Dataproc API may be overwritten. - jar_file_uris (Sequence[str]): - Optional. HCFS URIs of jar files to be added - to the Spark CLASSPATH. - logging_config (google.cloud.dataproc_v1.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - query_file_uri = proto.Field( - proto.STRING, - number=1, - oneof='queries', - ) - query_list = proto.Field( - proto.MESSAGE, - number=2, - oneof='queries', - message='QueryList', - ) - script_variables = proto.MapField( - proto.STRING, - proto.STRING, - number=3, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - jar_file_uris = proto.RepeatedField( - proto.STRING, - number=56, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=6, - message='LoggingConfig', - ) - - -class PigJob(proto.Message): - r"""A Dataproc job for running `Apache Pig `__ - queries on YARN. - - Attributes: - query_file_uri (str): - The HCFS URI of the script that contains the - Pig queries. - query_list (google.cloud.dataproc_v1.types.QueryList): - A list of queries. - continue_on_failure (bool): - Optional. Whether to continue executing queries if a query - fails. The default value is ``false``. Setting to ``true`` - can be useful when executing independent parallel queries. - script_variables (Sequence[google.cloud.dataproc_v1.types.PigJob.ScriptVariablesEntry]): - Optional. Mapping of query variable names to values - (equivalent to the Pig command: ``name=[value]``). - properties (Sequence[google.cloud.dataproc_v1.types.PigJob.PropertiesEntry]): - Optional. A mapping of property names to values, used to - configure Pig. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in /etc/hadoop/conf/*-site.xml, - /etc/pig/conf/pig.properties, and classes in user code. - jar_file_uris (Sequence[str]): - Optional. HCFS URIs of jar files to add to - the CLASSPATH of the Pig Client and Hadoop - MapReduce (MR) tasks. Can contain Pig UDFs. - logging_config (google.cloud.dataproc_v1.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - query_file_uri = proto.Field( - proto.STRING, - number=1, - oneof='queries', - ) - query_list = proto.Field( - proto.MESSAGE, - number=2, - oneof='queries', - message='QueryList', - ) - continue_on_failure = proto.Field( - proto.BOOL, - number=3, - ) - script_variables = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - jar_file_uris = proto.RepeatedField( - proto.STRING, - number=6, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=7, - message='LoggingConfig', - ) - - -class SparkRJob(proto.Message): - r"""A Dataproc job for running `Apache - SparkR `__ - applications on YARN. - - Attributes: - main_r_file_uri (str): - Required. The HCFS URI of the main R file to - use as the driver. Must be a .R file. - args (Sequence[str]): - Optional. The arguments to pass to the driver. Do not - include arguments, such as ``--conf``, that can be set as - job properties, since a collision may occur that causes an - incorrect job submission. - file_uris (Sequence[str]): - Optional. HCFS URIs of files to be placed in - the working directory of each executor. Useful - for naively parallel tasks. - archive_uris (Sequence[str]): - Optional. HCFS URIs of archives to be - extracted into the working directory of each - executor. Supported file types: .jar, .tar, - .tar.gz, .tgz, and .zip. - properties (Sequence[google.cloud.dataproc_v1.types.SparkRJob.PropertiesEntry]): - Optional. A mapping of property names to - values, used to configure SparkR. Properties - that conflict with values set by the Dataproc - API may be overwritten. Can include properties - set in - /etc/spark/conf/spark-defaults.conf and classes - in user code. - logging_config (google.cloud.dataproc_v1.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - main_r_file_uri = proto.Field( - proto.STRING, - number=1, - ) - args = proto.RepeatedField( - proto.STRING, - number=2, - ) - file_uris = proto.RepeatedField( - proto.STRING, - number=3, - ) - archive_uris = proto.RepeatedField( - proto.STRING, - number=4, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=6, - message='LoggingConfig', - ) - - -class PrestoJob(proto.Message): - r"""A Dataproc job for running `Presto `__ - queries. **IMPORTANT**: The `Dataproc Presto Optional - Component `__ - must be enabled when the cluster is created to submit a Presto job - to the cluster. - - Attributes: - query_file_uri (str): - The HCFS URI of the script that contains SQL - queries. - query_list (google.cloud.dataproc_v1.types.QueryList): - A list of queries. - continue_on_failure (bool): - Optional. Whether to continue executing queries if a query - fails. The default value is ``false``. Setting to ``true`` - can be useful when executing independent parallel queries. - output_format (str): - Optional. The format in which query output - will be displayed. See the Presto documentation - for supported output formats - client_tags (Sequence[str]): - Optional. Presto client tags to attach to - this query - properties (Sequence[google.cloud.dataproc_v1.types.PrestoJob.PropertiesEntry]): - Optional. A mapping of property names to values. Used to set - Presto `session - properties `__ - Equivalent to using the --session flag in the Presto CLI - logging_config (google.cloud.dataproc_v1.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - query_file_uri = proto.Field( - proto.STRING, - number=1, - oneof='queries', - ) - query_list = proto.Field( - proto.MESSAGE, - number=2, - oneof='queries', - message='QueryList', - ) - continue_on_failure = proto.Field( - proto.BOOL, - number=3, - ) - output_format = proto.Field( - proto.STRING, - number=4, - ) - client_tags = proto.RepeatedField( - proto.STRING, - number=5, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=7, - message='LoggingConfig', - ) - - -class JobPlacement(proto.Message): - r"""Dataproc job config. - Attributes: - cluster_name (str): - Required. The name of the cluster where the - job will be submitted. - cluster_uuid (str): - Output only. A cluster UUID generated by the - Dataproc service when the job is submitted. - cluster_labels (Sequence[google.cloud.dataproc_v1.types.JobPlacement.ClusterLabelsEntry]): - Optional. Cluster labels to identify a - cluster where the job will be submitted. - """ - - cluster_name = proto.Field( - proto.STRING, - number=1, - ) - cluster_uuid = proto.Field( - proto.STRING, - number=2, - ) - cluster_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=3, - ) - - -class JobStatus(proto.Message): - r"""Dataproc job status. - Attributes: - state (google.cloud.dataproc_v1.types.JobStatus.State): - Output only. A state message specifying the - overall job state. - details (str): - Optional. Output only. Job state details, - such as an error description if the state is - ERROR. - state_start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time when this state was - entered. - substate (google.cloud.dataproc_v1.types.JobStatus.Substate): - Output only. Additional state information, - which includes status reported by the agent. - """ - class State(proto.Enum): - r"""The job state.""" - STATE_UNSPECIFIED = 0 - PENDING = 1 - SETUP_DONE = 8 - RUNNING = 2 - CANCEL_PENDING = 3 - CANCEL_STARTED = 7 - CANCELLED = 4 - DONE = 5 - ERROR = 6 - ATTEMPT_FAILURE = 9 - - class Substate(proto.Enum): - r"""The job substate.""" - UNSPECIFIED = 0 - SUBMITTED = 1 - QUEUED = 2 - STALE_STATUS = 3 - - state = proto.Field( - proto.ENUM, - number=1, - enum=State, - ) - details = proto.Field( - proto.STRING, - number=2, - ) - state_start_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - substate = proto.Field( - proto.ENUM, - number=7, - enum=Substate, - ) - - -class JobReference(proto.Message): - r"""Encapsulates the full scoping used to reference a job. - Attributes: - project_id (str): - Optional. The ID of the Google Cloud Platform - project that the job belongs to. If specified, - must match the request project ID. - job_id (str): - Optional. The job ID, which must be unique within the - project. - - The ID must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), or hyphens (-). The maximum length is 100 - characters. - - If not specified by the caller, the job ID will be provided - by the server. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - job_id = proto.Field( - proto.STRING, - number=2, - ) - - -class YarnApplication(proto.Message): - r"""A YARN application created by a job. Application information is a - subset of - org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. - - **Beta Feature**: This report is available for testing purposes - only. It may be changed before final release. - - Attributes: - name (str): - Required. The application name. - state (google.cloud.dataproc_v1.types.YarnApplication.State): - Required. The application state. - progress (float): - Required. The numerical progress of the - application, from 1 to 100. - tracking_url (str): - Optional. The HTTP URL of the - ApplicationMaster, HistoryServer, or - TimelineServer that provides application- - specific information. The URL uses the internal - hostname, and requires a proxy server for - resolution and, possibly, access. - """ - class State(proto.Enum): - r"""The application state, corresponding to - YarnProtos.YarnApplicationStateProto. - """ - STATE_UNSPECIFIED = 0 - NEW = 1 - NEW_SAVING = 2 - SUBMITTED = 3 - ACCEPTED = 4 - RUNNING = 5 - FINISHED = 6 - FAILED = 7 - KILLED = 8 - - name = proto.Field( - proto.STRING, - number=1, - ) - state = proto.Field( - proto.ENUM, - number=2, - enum=State, - ) - progress = proto.Field( - proto.FLOAT, - number=3, - ) - tracking_url = proto.Field( - proto.STRING, - number=4, - ) - - -class Job(proto.Message): - r"""A Dataproc job resource. - Attributes: - reference (google.cloud.dataproc_v1.types.JobReference): - Optional. The fully qualified reference to the job, which - can be used to obtain the equivalent REST path of the job - resource. If this property is not specified when a job is - created, the server generates a job_id. - placement (google.cloud.dataproc_v1.types.JobPlacement): - Required. Job information, including how, - when, and where to run the job. - hadoop_job (google.cloud.dataproc_v1.types.HadoopJob): - Optional. Job is a Hadoop job. - spark_job (google.cloud.dataproc_v1.types.SparkJob): - Optional. Job is a Spark job. - pyspark_job (google.cloud.dataproc_v1.types.PySparkJob): - Optional. Job is a PySpark job. - hive_job (google.cloud.dataproc_v1.types.HiveJob): - Optional. Job is a Hive job. - pig_job (google.cloud.dataproc_v1.types.PigJob): - Optional. Job is a Pig job. - spark_r_job (google.cloud.dataproc_v1.types.SparkRJob): - Optional. Job is a SparkR job. - spark_sql_job (google.cloud.dataproc_v1.types.SparkSqlJob): - Optional. Job is a SparkSql job. - presto_job (google.cloud.dataproc_v1.types.PrestoJob): - Optional. Job is a Presto job. - status (google.cloud.dataproc_v1.types.JobStatus): - Output only. The job status. Additional application-specific - status information may be contained in the type_job and - yarn_applications fields. - status_history (Sequence[google.cloud.dataproc_v1.types.JobStatus]): - Output only. The previous job status. - yarn_applications (Sequence[google.cloud.dataproc_v1.types.YarnApplication]): - Output only. The collection of YARN applications spun up by - this job. - - **Beta** Feature: This report is available for testing - purposes only. It may be changed before final release. - driver_output_resource_uri (str): - Output only. A URI pointing to the location - of the stdout of the job's driver program. - driver_control_files_uri (str): - Output only. If present, the location of miscellaneous - control files which may be used as part of job setup and - handling. If not present, control files may be placed in the - same location as ``driver_output_uri``. - labels (Sequence[google.cloud.dataproc_v1.types.Job.LabelsEntry]): - Optional. The labels to associate with this job. Label - **keys** must contain 1 to 63 characters, and must conform - to `RFC 1035 `__. - Label **values** may be empty, but, if present, must contain - 1 to 63 characters, and must conform to `RFC - 1035 `__. No more than - 32 labels can be associated with a job. - scheduling (google.cloud.dataproc_v1.types.JobScheduling): - Optional. Job scheduling configuration. - job_uuid (str): - Output only. A UUID that uniquely identifies a job within - the project over time. This is in contrast to a - user-settable reference.job_id that may be reused over time. - done (bool): - Output only. Indicates whether the job is completed. If the - value is ``false``, the job is still in progress. If - ``true``, the job is completed, and ``status.state`` field - will indicate if it was successful, failed, or cancelled. - """ - - reference = proto.Field( - proto.MESSAGE, - number=1, - message='JobReference', - ) - placement = proto.Field( - proto.MESSAGE, - number=2, - message='JobPlacement', - ) - hadoop_job = proto.Field( - proto.MESSAGE, - number=3, - oneof='type_job', - message='HadoopJob', - ) - spark_job = proto.Field( - proto.MESSAGE, - number=4, - oneof='type_job', - message='SparkJob', - ) - pyspark_job = proto.Field( - proto.MESSAGE, - number=5, - oneof='type_job', - message='PySparkJob', - ) - hive_job = proto.Field( - proto.MESSAGE, - number=6, - oneof='type_job', - message='HiveJob', - ) - pig_job = proto.Field( - proto.MESSAGE, - number=7, - oneof='type_job', - message='PigJob', - ) - spark_r_job = proto.Field( - proto.MESSAGE, - number=21, - oneof='type_job', - message='SparkRJob', - ) - spark_sql_job = proto.Field( - proto.MESSAGE, - number=12, - oneof='type_job', - message='SparkSqlJob', - ) - presto_job = proto.Field( - proto.MESSAGE, - number=23, - oneof='type_job', - message='PrestoJob', - ) - status = proto.Field( - proto.MESSAGE, - number=8, - message='JobStatus', - ) - status_history = proto.RepeatedField( - proto.MESSAGE, - number=13, - message='JobStatus', - ) - yarn_applications = proto.RepeatedField( - proto.MESSAGE, - number=9, - message='YarnApplication', - ) - driver_output_resource_uri = proto.Field( - proto.STRING, - number=17, - ) - driver_control_files_uri = proto.Field( - proto.STRING, - number=15, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=18, - ) - scheduling = proto.Field( - proto.MESSAGE, - number=20, - message='JobScheduling', - ) - job_uuid = proto.Field( - proto.STRING, - number=22, - ) - done = proto.Field( - proto.BOOL, - number=24, - ) - - -class JobScheduling(proto.Message): - r"""Job scheduling options. - Attributes: - max_failures_per_hour (int): - Optional. Maximum number of times per hour a - driver may be restarted as a result of driver - exiting with non-zero code before job is - reported failed. - - A job may be reported as thrashing if driver - exits with non-zero code 4 times within 10 - minute window. - - Maximum value is 10. - max_failures_total (int): - Optional. Maximum number of times in total a - driver may be restarted as a result of driver - exiting with non-zero code before job is - reported failed. Maximum value is 240. - """ - - max_failures_per_hour = proto.Field( - proto.INT32, - number=1, - ) - max_failures_total = proto.Field( - proto.INT32, - number=2, - ) - - -class SubmitJobRequest(proto.Message): - r"""A request to submit a job. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - job (google.cloud.dataproc_v1.types.Job): - Required. The job resource. - request_id (str): - Optional. A unique id used to identify the request. If the - server receives two - `SubmitJobRequest `__\ s - with the same id, then the second request will be ignored - and the first [Job][google.cloud.dataproc.v1.Job] created - and stored in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - job = proto.Field( - proto.MESSAGE, - number=2, - message='Job', - ) - request_id = proto.Field( - proto.STRING, - number=4, - ) - - -class JobMetadata(proto.Message): - r"""Job Operation metadata. - Attributes: - job_id (str): - Output only. The job id. - status (google.cloud.dataproc_v1.types.JobStatus): - Output only. Most recent job status. - operation_type (str): - Output only. Operation type. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Job submission time. - """ - - job_id = proto.Field( - proto.STRING, - number=1, - ) - status = proto.Field( - proto.MESSAGE, - number=2, - message='JobStatus', - ) - operation_type = proto.Field( - proto.STRING, - number=3, - ) - start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class GetJobRequest(proto.Message): - r"""A request to get the resource representation for a job in a - project. - - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - job_id (str): - Required. The job ID. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - job_id = proto.Field( - proto.STRING, - number=2, - ) - - -class ListJobsRequest(proto.Message): - r"""A request to list jobs in a project. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - page_size (int): - Optional. The number of results to return in - each response. - page_token (str): - Optional. The page token, returned by a - previous call, to request the next page of - results. - cluster_name (str): - Optional. If set, the returned jobs list - includes only jobs that were submitted to the - named cluster. - job_state_matcher (google.cloud.dataproc_v1.types.ListJobsRequest.JobStateMatcher): - Optional. Specifies enumerated categories of jobs to list. - (default = match ALL jobs). - - If ``filter`` is provided, ``jobStateMatcher`` will be - ignored. - filter (str): - Optional. A filter constraining the jobs to list. Filters - are case-sensitive and have the following syntax: - - [field = value] AND [field [= value]] ... - - where **field** is ``status.state`` or ``labels.[KEY]``, and - ``[KEY]`` is a label key. **value** can be ``*`` to match - all values. ``status.state`` can be either ``ACTIVE`` or - ``NON_ACTIVE``. Only the logical ``AND`` operator is - supported; space-separated items are treated as having an - implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND labels.env = staging AND - labels.starred = \* - """ - class JobStateMatcher(proto.Enum): - r"""A matcher that specifies categories of job states.""" - ALL = 0 - ACTIVE = 1 - NON_ACTIVE = 2 - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=6, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - cluster_name = proto.Field( - proto.STRING, - number=4, - ) - job_state_matcher = proto.Field( - proto.ENUM, - number=5, - enum=JobStateMatcher, - ) - filter = proto.Field( - proto.STRING, - number=7, - ) - - -class UpdateJobRequest(proto.Message): - r"""A request to update a job. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - job_id (str): - Required. The job ID. - job (google.cloud.dataproc_v1.types.Job): - Required. The changes to the job. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Specifies the path, relative to Job, of the field - to update. For example, to update the labels of a Job the - update_mask parameter would be specified as labels, and the - ``PATCH`` request body would specify the new value. Note: - Currently, labels is the only field that can be updated. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=2, - ) - job_id = proto.Field( - proto.STRING, - number=3, - ) - job = proto.Field( - proto.MESSAGE, - number=4, - message='Job', - ) - update_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListJobsResponse(proto.Message): - r"""A list of jobs in a project. - Attributes: - jobs (Sequence[google.cloud.dataproc_v1.types.Job]): - Output only. Jobs list. - next_page_token (str): - Optional. This token is included in the response if there - are more results to fetch. To fetch additional results, - provide this value as the ``page_token`` in a subsequent - ListJobsRequest. - """ - - @property - def raw_page(self): - return self - - jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Job', - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class CancelJobRequest(proto.Message): - r"""A request to cancel a job. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - job_id (str): - Required. The job ID. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - job_id = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteJobRequest(proto.Message): - r"""A request to delete a job. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - job_id (str): - Required. The job ID. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - job_id = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/operations.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/operations.py deleted file mode 100644 index e3894a2a..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/operations.py +++ /dev/null @@ -1,133 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.dataproc.v1', - manifest={ - 'ClusterOperationStatus', - 'ClusterOperationMetadata', - }, -) - - -class ClusterOperationStatus(proto.Message): - r"""The status of the operation. - Attributes: - state (google.cloud.dataproc_v1.types.ClusterOperationStatus.State): - Output only. A message containing the - operation state. - inner_state (str): - Output only. A message containing the - detailed operation state. - details (str): - Output only. A message containing any - operation metadata details. - state_start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time this state was entered. - """ - class State(proto.Enum): - r"""The operation state.""" - UNKNOWN = 0 - PENDING = 1 - RUNNING = 2 - DONE = 3 - - state = proto.Field( - proto.ENUM, - number=1, - enum=State, - ) - inner_state = proto.Field( - proto.STRING, - number=2, - ) - details = proto.Field( - proto.STRING, - number=3, - ) - state_start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class ClusterOperationMetadata(proto.Message): - r"""Metadata describing the operation. - Attributes: - cluster_name (str): - Output only. Name of the cluster for the - operation. - cluster_uuid (str): - Output only. Cluster UUID for the operation. - status (google.cloud.dataproc_v1.types.ClusterOperationStatus): - Output only. Current operation status. - status_history (Sequence[google.cloud.dataproc_v1.types.ClusterOperationStatus]): - Output only. The previous operation status. - operation_type (str): - Output only. The operation type. - description (str): - Output only. Short description of operation. - labels (Sequence[google.cloud.dataproc_v1.types.ClusterOperationMetadata.LabelsEntry]): - Output only. Labels associated with the - operation - warnings (Sequence[str]): - Output only. Errors encountered during - operation execution. - """ - - cluster_name = proto.Field( - proto.STRING, - number=7, - ) - cluster_uuid = proto.Field( - proto.STRING, - number=8, - ) - status = proto.Field( - proto.MESSAGE, - number=9, - message='ClusterOperationStatus', - ) - status_history = proto.RepeatedField( - proto.MESSAGE, - number=10, - message='ClusterOperationStatus', - ) - operation_type = proto.Field( - proto.STRING, - number=11, - ) - description = proto.Field( - proto.STRING, - number=12, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=13, - ) - warnings = proto.RepeatedField( - proto.STRING, - number=14, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/shared.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/shared.py deleted file mode 100644 index 69371c0d..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/shared.py +++ /dev/null @@ -1,46 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.dataproc.v1', - manifest={ - 'Component', - }, -) - - -class Component(proto.Enum): - r"""Cluster components that can be activated. - Next ID: 16. - """ - COMPONENT_UNSPECIFIED = 0 - ANACONDA = 5 - DOCKER = 13 - DRUID = 9 - FLINK = 14 - HBASE = 11 - HIVE_WEBHCAT = 3 - JUPYTER = 1 - PRESTO = 6 - RANGER = 12 - SOLR = 10 - ZEPPELIN = 4 - ZOOKEEPER = 8 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/workflow_templates.py b/owl-bot-staging/v1/google/cloud/dataproc_v1/types/workflow_templates.py deleted file mode 100644 index 1c23ff3f..00000000 --- a/owl-bot-staging/v1/google/cloud/dataproc_v1/types/workflow_templates.py +++ /dev/null @@ -1,1050 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.dataproc_v1.types import clusters -from google.cloud.dataproc_v1.types import jobs as gcd_jobs -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.dataproc.v1', - manifest={ - 'WorkflowTemplate', - 'WorkflowTemplatePlacement', - 'ManagedCluster', - 'ClusterSelector', - 'OrderedJob', - 'TemplateParameter', - 'ParameterValidation', - 'RegexValidation', - 'ValueValidation', - 'WorkflowMetadata', - 'ClusterOperation', - 'WorkflowGraph', - 'WorkflowNode', - 'CreateWorkflowTemplateRequest', - 'GetWorkflowTemplateRequest', - 'InstantiateWorkflowTemplateRequest', - 'InstantiateInlineWorkflowTemplateRequest', - 'UpdateWorkflowTemplateRequest', - 'ListWorkflowTemplatesRequest', - 'ListWorkflowTemplatesResponse', - 'DeleteWorkflowTemplateRequest', - }, -) - - -class WorkflowTemplate(proto.Message): - r"""A Dataproc workflow template resource. - Attributes: - id (str): - - name (str): - Output only. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates``, the resource - name of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates``, the - resource name of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): - Optional. Used to perform a consistent read-modify-write. - - This field should be left blank for a - ``CreateWorkflowTemplate`` request. It is required for an - ``UpdateWorkflowTemplate`` request, and must match the - current server version. A typical update template flow would - fetch the current template with a ``GetWorkflowTemplate`` - request, which will return the current template with the - ``version`` field filled in with the current server version. - The user updates other fields in the template, then returns - it as part of the ``UpdateWorkflowTemplate`` request. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time template was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time template was last - updated. - labels (Sequence[google.cloud.dataproc_v1.types.WorkflowTemplate.LabelsEntry]): - Optional. The labels to associate with this template. These - labels will be propagated to all jobs and clusters created - by the workflow instance. - - Label **keys** must contain 1 to 63 characters, and must - conform to `RFC - 1035 `__. - - Label **values** may be empty, but, if present, must contain - 1 to 63 characters, and must conform to `RFC - 1035 `__. - - No more than 32 labels can be associated with a template. - placement (google.cloud.dataproc_v1.types.WorkflowTemplatePlacement): - Required. WorkflowTemplate scheduling - information. - jobs (Sequence[google.cloud.dataproc_v1.types.OrderedJob]): - Required. The Directed Acyclic Graph of Jobs - to submit. - parameters (Sequence[google.cloud.dataproc_v1.types.TemplateParameter]): - Optional. Template parameters whose values - are substituted into the template. Values for - parameters must be provided when the template is - instantiated. - dag_timeout (google.protobuf.duration_pb2.Duration): - Optional. Timeout duration for the DAG of jobs, expressed in - seconds (see `JSON representation of - duration `__). - The timeout duration must be from 10 minutes ("600s") to 24 - hours ("86400s"). The timer begins when the first job is - submitted. If the workflow is running at the end of the - timeout period, any remaining jobs are cancelled, the - workflow is ended, and if the workflow was running on a - `managed - cluster `__, - the cluster is deleted. - """ - - id = proto.Field( - proto.STRING, - number=2, - ) - name = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.INT32, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - placement = proto.Field( - proto.MESSAGE, - number=7, - message='WorkflowTemplatePlacement', - ) - jobs = proto.RepeatedField( - proto.MESSAGE, - number=8, - message='OrderedJob', - ) - parameters = proto.RepeatedField( - proto.MESSAGE, - number=9, - message='TemplateParameter', - ) - dag_timeout = proto.Field( - proto.MESSAGE, - number=10, - message=duration_pb2.Duration, - ) - - -class WorkflowTemplatePlacement(proto.Message): - r"""Specifies workflow execution target. - - Either ``managed_cluster`` or ``cluster_selector`` is required. - - Attributes: - managed_cluster (google.cloud.dataproc_v1.types.ManagedCluster): - A cluster that is managed by the workflow. - cluster_selector (google.cloud.dataproc_v1.types.ClusterSelector): - Optional. A selector that chooses target - cluster for jobs based on metadata. - - The selector is evaluated at the time each job - is submitted. - """ - - managed_cluster = proto.Field( - proto.MESSAGE, - number=1, - oneof='placement', - message='ManagedCluster', - ) - cluster_selector = proto.Field( - proto.MESSAGE, - number=2, - oneof='placement', - message='ClusterSelector', - ) - - -class ManagedCluster(proto.Message): - r"""Cluster that is managed by the workflow. - Attributes: - cluster_name (str): - Required. The cluster name prefix. A unique - cluster name will be formed by appending a - random suffix. - The name must contain only lower-case letters - (a-z), numbers (0-9), and hyphens (-). Must - begin with a letter. Cannot begin or end with - hyphen. Must consist of between 2 and 35 - characters. - config (google.cloud.dataproc_v1.types.ClusterConfig): - Required. The cluster configuration. - labels (Sequence[google.cloud.dataproc_v1.types.ManagedCluster.LabelsEntry]): - Optional. The labels to associate with this cluster. - - Label keys must be between 1 and 63 characters long, and - must conform to the following PCRE regular expression: - [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - - Label values must be between 1 and 63 characters long, and - must conform to the following PCRE regular expression: - [\p{Ll}\p{Lo}\p{N}_-]{0,63} - - No more than 32 labels can be associated with a given - cluster. - """ - - cluster_name = proto.Field( - proto.STRING, - number=2, - ) - config = proto.Field( - proto.MESSAGE, - number=3, - message=clusters.ClusterConfig, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - - -class ClusterSelector(proto.Message): - r"""A selector that chooses target cluster for jobs based on - metadata. - - Attributes: - zone (str): - Optional. The zone where workflow process - executes. This parameter does not affect the - selection of the cluster. - If unspecified, the zone of the first cluster - matching the selector is used. - cluster_labels (Sequence[google.cloud.dataproc_v1.types.ClusterSelector.ClusterLabelsEntry]): - Required. The cluster labels. Cluster must - have all labels to match. - """ - - zone = proto.Field( - proto.STRING, - number=1, - ) - cluster_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - - -class OrderedJob(proto.Message): - r"""A job executed by the workflow. - Attributes: - step_id (str): - Required. The step id. The id must be unique among all jobs - within the template. - - The step id is used as prefix for job id, as job - ``goog-dataproc-workflow-step-id`` label, and in - [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] - field from other steps. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). Cannot begin or end with - underscore or hyphen. Must consist of between 3 and 50 - characters. - hadoop_job (google.cloud.dataproc_v1.types.HadoopJob): - Optional. Job is a Hadoop job. - spark_job (google.cloud.dataproc_v1.types.SparkJob): - Optional. Job is a Spark job. - pyspark_job (google.cloud.dataproc_v1.types.PySparkJob): - Optional. Job is a PySpark job. - hive_job (google.cloud.dataproc_v1.types.HiveJob): - Optional. Job is a Hive job. - pig_job (google.cloud.dataproc_v1.types.PigJob): - Optional. Job is a Pig job. - spark_r_job (google.cloud.dataproc_v1.types.SparkRJob): - Optional. Job is a SparkR job. - spark_sql_job (google.cloud.dataproc_v1.types.SparkSqlJob): - Optional. Job is a SparkSql job. - presto_job (google.cloud.dataproc_v1.types.PrestoJob): - Optional. Job is a Presto job. - labels (Sequence[google.cloud.dataproc_v1.types.OrderedJob.LabelsEntry]): - Optional. The labels to associate with this job. - - Label keys must be between 1 and 63 characters long, and - must conform to the following regular expression: - [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - - Label values must be between 1 and 63 characters long, and - must conform to the following regular expression: - [\p{Ll}\p{Lo}\p{N}_-]{0,63} - - No more than 32 labels can be associated with a given job. - scheduling (google.cloud.dataproc_v1.types.JobScheduling): - Optional. Job scheduling configuration. - prerequisite_step_ids (Sequence[str]): - Optional. The optional list of prerequisite job step_ids. If - not specified, the job will start at the beginning of - workflow. - """ - - step_id = proto.Field( - proto.STRING, - number=1, - ) - hadoop_job = proto.Field( - proto.MESSAGE, - number=2, - oneof='job_type', - message=gcd_jobs.HadoopJob, - ) - spark_job = proto.Field( - proto.MESSAGE, - number=3, - oneof='job_type', - message=gcd_jobs.SparkJob, - ) - pyspark_job = proto.Field( - proto.MESSAGE, - number=4, - oneof='job_type', - message=gcd_jobs.PySparkJob, - ) - hive_job = proto.Field( - proto.MESSAGE, - number=5, - oneof='job_type', - message=gcd_jobs.HiveJob, - ) - pig_job = proto.Field( - proto.MESSAGE, - number=6, - oneof='job_type', - message=gcd_jobs.PigJob, - ) - spark_r_job = proto.Field( - proto.MESSAGE, - number=11, - oneof='job_type', - message=gcd_jobs.SparkRJob, - ) - spark_sql_job = proto.Field( - proto.MESSAGE, - number=7, - oneof='job_type', - message=gcd_jobs.SparkSqlJob, - ) - presto_job = proto.Field( - proto.MESSAGE, - number=12, - oneof='job_type', - message=gcd_jobs.PrestoJob, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=8, - ) - scheduling = proto.Field( - proto.MESSAGE, - number=9, - message=gcd_jobs.JobScheduling, - ) - prerequisite_step_ids = proto.RepeatedField( - proto.STRING, - number=10, - ) - - -class TemplateParameter(proto.Message): - r"""A configurable parameter that replaces one or more fields in - the template. Parameterizable fields: - - Labels - - File uris - - Job properties - - Job arguments - - Script variables - - Main class (in HadoopJob and SparkJob) - - Zone (in ClusterSelector) - - Attributes: - name (str): - Required. Parameter name. The parameter name is used as the - key, and paired with the parameter value, which are passed - to the template when the template is instantiated. The name - must contain only capital letters (A-Z), numbers (0-9), and - underscores (_), and must not start with a number. The - maximum length is 40 characters. - fields (Sequence[str]): - Required. Paths to all fields that the parameter replaces. A - field is allowed to appear in at most one parameter's list - of field paths. - - A field path is similar in syntax to a - [google.protobuf.FieldMask][google.protobuf.FieldMask]. For - example, a field path that references the zone field of a - workflow template's cluster selector would be specified as - ``placement.clusterSelector.zone``. - - Also, field paths can reference fields using the following - syntax: - - - Values in maps can be referenced by key: - - - labels['key'] - - placement.clusterSelector.clusterLabels['key'] - - placement.managedCluster.labels['key'] - - placement.clusterSelector.clusterLabels['key'] - - jobs['step-id'].labels['key'] - - - Jobs in the jobs list can be referenced by step-id: - - - jobs['step-id'].hadoopJob.mainJarFileUri - - jobs['step-id'].hiveJob.queryFileUri - - jobs['step-id'].pySparkJob.mainPythonFileUri - - jobs['step-id'].hadoopJob.jarFileUris[0] - - jobs['step-id'].hadoopJob.archiveUris[0] - - jobs['step-id'].hadoopJob.fileUris[0] - - jobs['step-id'].pySparkJob.pythonFileUris[0] - - - Items in repeated fields can be referenced by a - zero-based index: - - - jobs['step-id'].sparkJob.args[0] - - - Other examples: - - - jobs['step-id'].hadoopJob.properties['key'] - - jobs['step-id'].hadoopJob.args[0] - - jobs['step-id'].hiveJob.scriptVariables['key'] - - jobs['step-id'].hadoopJob.mainJarFileUri - - placement.clusterSelector.zone - - It may not be possible to parameterize maps and repeated - fields in their entirety since only individual map values - and individual items in repeated fields can be referenced. - For example, the following field paths are invalid: - - - placement.clusterSelector.clusterLabels - - jobs['step-id'].sparkJob.args - description (str): - Optional. Brief description of the parameter. - Must not exceed 1024 characters. - validation (google.cloud.dataproc_v1.types.ParameterValidation): - Optional. Validation rules to be applied to - this parameter's value. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - fields = proto.RepeatedField( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - validation = proto.Field( - proto.MESSAGE, - number=4, - message='ParameterValidation', - ) - - -class ParameterValidation(proto.Message): - r"""Configuration for parameter validation. - Attributes: - regex (google.cloud.dataproc_v1.types.RegexValidation): - Validation based on regular expressions. - values (google.cloud.dataproc_v1.types.ValueValidation): - Validation based on a list of allowed values. - """ - - regex = proto.Field( - proto.MESSAGE, - number=1, - oneof='validation_type', - message='RegexValidation', - ) - values = proto.Field( - proto.MESSAGE, - number=2, - oneof='validation_type', - message='ValueValidation', - ) - - -class RegexValidation(proto.Message): - r"""Validation based on regular expressions. - Attributes: - regexes (Sequence[str]): - Required. RE2 regular expressions used to - validate the parameter's value. The value must - match the regex in its entirety (substring - matches are not sufficient). - """ - - regexes = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class ValueValidation(proto.Message): - r"""Validation based on a list of allowed values. - Attributes: - values (Sequence[str]): - Required. List of allowed values for the - parameter. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class WorkflowMetadata(proto.Message): - r"""A Dataproc workflow template resource. - Attributes: - template (str): - Output only. The resource name of the workflow template as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates``, the resource - name of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates``, the - resource name of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): - Output only. The version of template at the - time of workflow instantiation. - create_cluster (google.cloud.dataproc_v1.types.ClusterOperation): - Output only. The create cluster operation - metadata. - graph (google.cloud.dataproc_v1.types.WorkflowGraph): - Output only. The workflow graph. - delete_cluster (google.cloud.dataproc_v1.types.ClusterOperation): - Output only. The delete cluster operation - metadata. - state (google.cloud.dataproc_v1.types.WorkflowMetadata.State): - Output only. The workflow state. - cluster_name (str): - Output only. The name of the target cluster. - parameters (Sequence[google.cloud.dataproc_v1.types.WorkflowMetadata.ParametersEntry]): - Map from parameter names to values that were - used for those parameters. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Workflow start time. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Workflow end time. - cluster_uuid (str): - Output only. The UUID of target cluster. - dag_timeout (google.protobuf.duration_pb2.Duration): - Output only. The timeout duration for the DAG of jobs, - expressed in seconds (see `JSON representation of - duration `__). - dag_start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. DAG start time, only set for workflows with - [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] - when DAG begins. - dag_end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. DAG end time, only set for workflows with - [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] - when DAG ends. - """ - class State(proto.Enum): - r"""The operation state.""" - UNKNOWN = 0 - PENDING = 1 - RUNNING = 2 - DONE = 3 - - template = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.INT32, - number=2, - ) - create_cluster = proto.Field( - proto.MESSAGE, - number=3, - message='ClusterOperation', - ) - graph = proto.Field( - proto.MESSAGE, - number=4, - message='WorkflowGraph', - ) - delete_cluster = proto.Field( - proto.MESSAGE, - number=5, - message='ClusterOperation', - ) - state = proto.Field( - proto.ENUM, - number=6, - enum=State, - ) - cluster_name = proto.Field( - proto.STRING, - number=7, - ) - parameters = proto.MapField( - proto.STRING, - proto.STRING, - number=8, - ) - start_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - cluster_uuid = proto.Field( - proto.STRING, - number=11, - ) - dag_timeout = proto.Field( - proto.MESSAGE, - number=12, - message=duration_pb2.Duration, - ) - dag_start_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - dag_end_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - - -class ClusterOperation(proto.Message): - r"""The cluster operation triggered by a workflow. - Attributes: - operation_id (str): - Output only. The id of the cluster operation. - error (str): - Output only. Error, if operation failed. - done (bool): - Output only. Indicates the operation is done. - """ - - operation_id = proto.Field( - proto.STRING, - number=1, - ) - error = proto.Field( - proto.STRING, - number=2, - ) - done = proto.Field( - proto.BOOL, - number=3, - ) - - -class WorkflowGraph(proto.Message): - r"""The workflow graph. - Attributes: - nodes (Sequence[google.cloud.dataproc_v1.types.WorkflowNode]): - Output only. The workflow nodes. - """ - - nodes = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='WorkflowNode', - ) - - -class WorkflowNode(proto.Message): - r"""The workflow node. - Attributes: - step_id (str): - Output only. The name of the node. - prerequisite_step_ids (Sequence[str]): - Output only. Node's prerequisite nodes. - job_id (str): - Output only. The job id; populated after the - node enters RUNNING state. - state (google.cloud.dataproc_v1.types.WorkflowNode.NodeState): - Output only. The node state. - error (str): - Output only. The error detail. - """ - class NodeState(proto.Enum): - r"""The workflow node state.""" - NODE_STATE_UNSPECIFIED = 0 - BLOCKED = 1 - RUNNABLE = 2 - RUNNING = 3 - COMPLETED = 4 - FAILED = 5 - - step_id = proto.Field( - proto.STRING, - number=1, - ) - prerequisite_step_ids = proto.RepeatedField( - proto.STRING, - number=2, - ) - job_id = proto.Field( - proto.STRING, - number=3, - ) - state = proto.Field( - proto.ENUM, - number=5, - enum=NodeState, - ) - error = proto.Field( - proto.STRING, - number=6, - ) - - -class CreateWorkflowTemplateRequest(proto.Message): - r"""A request to create a workflow template. - Attributes: - parent (str): - Required. The resource name of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,create``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.create``, the - resource name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - template (google.cloud.dataproc_v1.types.WorkflowTemplate): - Required. The Dataproc workflow template to - create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - template = proto.Field( - proto.MESSAGE, - number=2, - message='WorkflowTemplate', - ) - - -class GetWorkflowTemplateRequest(proto.Message): - r"""A request to fetch a workflow template. - Attributes: - name (str): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.get``, the - resource name of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.get``, the - resource name of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): - Optional. The version of workflow template to - retrieve. Only previously instantiated versions - can be retrieved. - If unspecified, retrieves the current version. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.INT32, - number=2, - ) - - -class InstantiateWorkflowTemplateRequest(proto.Message): - r"""A request to instantiate a workflow template. - Attributes: - name (str): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): - Optional. The version of workflow template to - instantiate. If specified, the workflow will be - instantiated only if the current version of the - workflow template has the supplied version. - This option cannot be used to instantiate a - previous version of workflow template. - request_id (str): - Optional. A tag that prevents multiple concurrent workflow - instances with the same tag from running. This mitigates - risk of concurrent instances started due to retries. - - It is recommended to always set this value to a - `UUID `__. - - The tag must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - parameters (Sequence[google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest.ParametersEntry]): - Optional. Map from parameter names to values - that should be used for those parameters. Values - may not exceed 1000 characters. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.INT32, - number=2, - ) - request_id = proto.Field( - proto.STRING, - number=5, - ) - parameters = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - - -class InstantiateInlineWorkflowTemplateRequest(proto.Message): - r"""A request to instantiate an inline workflow template. - Attributes: - parent (str): - Required. The resource name of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For - ``projects.regions.workflowTemplates,instantiateinline``, - the resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For - ``projects.locations.workflowTemplates.instantiateinline``, - the resource name of the location has the following - format: ``projects/{project_id}/locations/{location}`` - template (google.cloud.dataproc_v1.types.WorkflowTemplate): - Required. The workflow template to - instantiate. - request_id (str): - Optional. A tag that prevents multiple concurrent workflow - instances with the same tag from running. This mitigates - risk of concurrent instances started due to retries. - - It is recommended to always set this value to a - `UUID `__. - - The tag must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - template = proto.Field( - proto.MESSAGE, - number=2, - message='WorkflowTemplate', - ) - request_id = proto.Field( - proto.STRING, - number=3, - ) - - -class UpdateWorkflowTemplateRequest(proto.Message): - r"""A request to update a workflow template. - Attributes: - template (google.cloud.dataproc_v1.types.WorkflowTemplate): - Required. The updated workflow template. - - The ``template.version`` field must match the current - version. - """ - - template = proto.Field( - proto.MESSAGE, - number=1, - message='WorkflowTemplate', - ) - - -class ListWorkflowTemplatesRequest(proto.Message): - r"""A request to list workflow templates in a project. - Attributes: - parent (str): - Required. The resource name of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,list``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.list``, the - resource name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - page_size (int): - Optional. The maximum number of results to - return in each response. - page_token (str): - Optional. The page token, returned by a - previous call, to request the next page of - results. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - - -class ListWorkflowTemplatesResponse(proto.Message): - r"""A response to a request to list workflow templates in a - project. - - Attributes: - templates (Sequence[google.cloud.dataproc_v1.types.WorkflowTemplate]): - Output only. WorkflowTemplates list. - next_page_token (str): - Output only. This token is included in the response if there - are more results to fetch. To fetch additional results, - provide this value as the page_token in a subsequent - ListWorkflowTemplatesRequest. - """ - - @property - def raw_page(self): - return self - - templates = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='WorkflowTemplate', - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteWorkflowTemplateRequest(proto.Message): - r"""A request to delete a workflow template. - Currently started workflows will remain running. - - Attributes: - name (str): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.delete``, the - resource name of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): - Optional. The version of workflow template to - delete. If specified, will only delete the - template if the current server version matches - specified version. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/mypy.ini b/owl-bot-staging/v1/mypy.ini deleted file mode 100644 index 4505b485..00000000 --- a/owl-bot-staging/v1/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1/noxfile.py b/owl-bot-staging/v1/noxfile.py deleted file mode 100644 index 5d9be515..00000000 --- a/owl-bot-staging/v1/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/dataproc_v1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.7') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.6') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1/scripts/fixup_dataproc_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_dataproc_v1_keywords.py deleted file mode 100644 index a976d109..00000000 --- a/owl-bot-staging/v1/scripts/fixup_dataproc_v1_keywords.py +++ /dev/null @@ -1,202 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class dataprocCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'cancel_job': ('project_id', 'region', 'job_id', ), - 'create_autoscaling_policy': ('parent', 'policy', ), - 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', ), - 'create_workflow_template': ('parent', 'template', ), - 'delete_autoscaling_policy': ('name', ), - 'delete_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), - 'delete_job': ('project_id', 'region', 'job_id', ), - 'delete_workflow_template': ('name', 'version', ), - 'diagnose_cluster': ('project_id', 'region', 'cluster_name', ), - 'get_autoscaling_policy': ('name', ), - 'get_cluster': ('project_id', 'region', 'cluster_name', ), - 'get_job': ('project_id', 'region', 'job_id', ), - 'get_workflow_template': ('name', 'version', ), - 'instantiate_inline_workflow_template': ('parent', 'template', 'request_id', ), - 'instantiate_workflow_template': ('name', 'version', 'request_id', 'parameters', ), - 'list_autoscaling_policies': ('parent', 'page_size', 'page_token', ), - 'list_clusters': ('project_id', 'region', 'filter', 'page_size', 'page_token', ), - 'list_jobs': ('project_id', 'region', 'page_size', 'page_token', 'cluster_name', 'job_state_matcher', 'filter', ), - 'list_workflow_templates': ('parent', 'page_size', 'page_token', ), - 'start_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), - 'stop_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), - 'submit_job': ('project_id', 'region', 'job', 'request_id', ), - 'submit_job_as_operation': ('project_id', 'region', 'job', 'request_id', ), - 'update_autoscaling_policy': ('policy', ), - 'update_cluster': ('project_id', 'region', 'cluster_name', 'cluster', 'update_mask', 'graceful_decommission_timeout', 'request_id', ), - 'update_job': ('project_id', 'region', 'job_id', 'job', 'update_mask', ), - 'update_workflow_template': ('template', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=dataprocCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the dataproc client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/setup.py b/owl-bot-staging/v1/setup.py deleted file mode 100644 index 63f4596f..00000000 --- a/owl-bot-staging/v1/setup.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-cloud-dataproc', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google', 'google.cloud'), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.15.0', - 'packaging >= 14.3', ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1/tests/__init__.py b/owl-bot-staging/v1/tests/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/__init__.py b/owl-bot-staging/v1/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py deleted file mode 100644 index 5e2c6c0d..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py +++ /dev/null @@ -1,2293 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.dataproc_v1.services.autoscaling_policy_service import AutoscalingPolicyServiceAsyncClient -from google.cloud.dataproc_v1.services.autoscaling_policy_service import AutoscalingPolicyServiceClient -from google.cloud.dataproc_v1.services.autoscaling_policy_service import pagers -from google.cloud.dataproc_v1.services.autoscaling_policy_service import transports -from google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.dataproc_v1.types import autoscaling_policies -from google.oauth2 import service_account -from google.protobuf import duration_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(None) is None - assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - AutoscalingPolicyServiceClient, - AutoscalingPolicyServiceAsyncClient, -]) -def test_autoscaling_policy_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - AutoscalingPolicyServiceClient, - AutoscalingPolicyServiceAsyncClient, -]) -def test_autoscaling_policy_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), - (transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_autoscaling_policy_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - AutoscalingPolicyServiceClient, - AutoscalingPolicyServiceAsyncClient, -]) -def test_autoscaling_policy_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_autoscaling_policy_service_client_get_transport_class(): - transport = AutoscalingPolicyServiceClient.get_transport_class() - available_transports = [ - transports.AutoscalingPolicyServiceGrpcTransport, - ] - assert transport in available_transports - - transport = AutoscalingPolicyServiceClient.get_transport_class("grpc") - assert transport == transports.AutoscalingPolicyServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), - (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(AutoscalingPolicyServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceClient)) -@mock.patch.object(AutoscalingPolicyServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceAsyncClient)) -def test_autoscaling_policy_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(AutoscalingPolicyServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(AutoscalingPolicyServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc", "true"), - (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc", "false"), - (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(AutoscalingPolicyServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceClient)) -@mock.patch.object(AutoscalingPolicyServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_autoscaling_policy_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), - (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_autoscaling_policy_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), - (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_autoscaling_policy_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_autoscaling_policy_service_client_client_options_from_dict(): - with mock.patch('google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = AutoscalingPolicyServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.CreateAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy( - id='id_value', - name='name_value', - basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(graceful_decommission_timeout=duration_pb2.Duration(seconds=751))), - ) - response = client.create_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, autoscaling_policies.AutoscalingPolicy) - assert response.id == 'id_value' - assert response.name == 'name_value' - - -def test_create_autoscaling_policy_from_dict(): - test_create_autoscaling_policy(request_type=dict) - - -def test_create_autoscaling_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - client.create_autoscaling_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() - - -@pytest.mark.asyncio -async def test_create_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.CreateAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy( - id='id_value', - name='name_value', - )) - response = await client.create_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, autoscaling_policies.AutoscalingPolicy) - assert response.id == 'id_value' - assert response.name == 'name_value' - - -@pytest.mark.asyncio -async def test_create_autoscaling_policy_async_from_dict(): - await test_create_autoscaling_policy_async(request_type=dict) - - -def test_create_autoscaling_policy_field_headers(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.CreateAutoscalingPolicyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - call.return_value = autoscaling_policies.AutoscalingPolicy() - client.create_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_autoscaling_policy_field_headers_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.CreateAutoscalingPolicyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) - await client.create_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_autoscaling_policy_flattened(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_autoscaling_policy( - parent='parent_value', - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') - - -def test_create_autoscaling_policy_flattened_error(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_autoscaling_policy( - autoscaling_policies.CreateAutoscalingPolicyRequest(), - parent='parent_value', - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_create_autoscaling_policy_flattened_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_autoscaling_policy( - parent='parent_value', - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') - - -@pytest.mark.asyncio -async def test_create_autoscaling_policy_flattened_error_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_autoscaling_policy( - autoscaling_policies.CreateAutoscalingPolicyRequest(), - parent='parent_value', - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - -def test_update_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.UpdateAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy( - id='id_value', - name='name_value', - basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(graceful_decommission_timeout=duration_pb2.Duration(seconds=751))), - ) - response = client.update_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, autoscaling_policies.AutoscalingPolicy) - assert response.id == 'id_value' - assert response.name == 'name_value' - - -def test_update_autoscaling_policy_from_dict(): - test_update_autoscaling_policy(request_type=dict) - - -def test_update_autoscaling_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - client.update_autoscaling_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() - - -@pytest.mark.asyncio -async def test_update_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.UpdateAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy( - id='id_value', - name='name_value', - )) - response = await client.update_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, autoscaling_policies.AutoscalingPolicy) - assert response.id == 'id_value' - assert response.name == 'name_value' - - -@pytest.mark.asyncio -async def test_update_autoscaling_policy_async_from_dict(): - await test_update_autoscaling_policy_async(request_type=dict) - - -def test_update_autoscaling_policy_field_headers(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.UpdateAutoscalingPolicyRequest() - - request.policy.name = 'policy.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - call.return_value = autoscaling_policies.AutoscalingPolicy() - client.update_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'policy.name=policy.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_autoscaling_policy_field_headers_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.UpdateAutoscalingPolicyRequest() - - request.policy.name = 'policy.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) - await client.update_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'policy.name=policy.name/value', - ) in kw['metadata'] - - -def test_update_autoscaling_policy_flattened(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_autoscaling_policy( - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') - - -def test_update_autoscaling_policy_flattened_error(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_autoscaling_policy( - autoscaling_policies.UpdateAutoscalingPolicyRequest(), - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_update_autoscaling_policy_flattened_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_autoscaling_policy( - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') - - -@pytest.mark.asyncio -async def test_update_autoscaling_policy_flattened_error_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_autoscaling_policy( - autoscaling_policies.UpdateAutoscalingPolicyRequest(), - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - -def test_get_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.GetAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy( - id='id_value', - name='name_value', - basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(graceful_decommission_timeout=duration_pb2.Duration(seconds=751))), - ) - response = client.get_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, autoscaling_policies.AutoscalingPolicy) - assert response.id == 'id_value' - assert response.name == 'name_value' - - -def test_get_autoscaling_policy_from_dict(): - test_get_autoscaling_policy(request_type=dict) - - -def test_get_autoscaling_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - client.get_autoscaling_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() - - -@pytest.mark.asyncio -async def test_get_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.GetAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy( - id='id_value', - name='name_value', - )) - response = await client.get_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, autoscaling_policies.AutoscalingPolicy) - assert response.id == 'id_value' - assert response.name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_autoscaling_policy_async_from_dict(): - await test_get_autoscaling_policy_async(request_type=dict) - - -def test_get_autoscaling_policy_field_headers(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.GetAutoscalingPolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - call.return_value = autoscaling_policies.AutoscalingPolicy() - client.get_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_autoscaling_policy_field_headers_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.GetAutoscalingPolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) - await client.get_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_autoscaling_policy_flattened(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_autoscaling_policy( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_autoscaling_policy_flattened_error(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_autoscaling_policy( - autoscaling_policies.GetAutoscalingPolicyRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_autoscaling_policy_flattened_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_autoscaling_policy( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_autoscaling_policy_flattened_error_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_autoscaling_policy( - autoscaling_policies.GetAutoscalingPolicyRequest(), - name='name_value', - ) - - -def test_list_autoscaling_policies(transport: str = 'grpc', request_type=autoscaling_policies.ListAutoscalingPoliciesRequest): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_autoscaling_policies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAutoscalingPoliciesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_autoscaling_policies_from_dict(): - test_list_autoscaling_policies(request_type=dict) - - -def test_list_autoscaling_policies_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - client.list_autoscaling_policies() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() - - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.ListAutoscalingPoliciesRequest): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.ListAutoscalingPoliciesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_autoscaling_policies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAutoscalingPoliciesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_async_from_dict(): - await test_list_autoscaling_policies_async(request_type=dict) - - -def test_list_autoscaling_policies_field_headers(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.ListAutoscalingPoliciesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() - client.list_autoscaling_policies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_field_headers_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.ListAutoscalingPoliciesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.ListAutoscalingPoliciesResponse()) - await client.list_autoscaling_policies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_autoscaling_policies_flattened(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_autoscaling_policies( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_autoscaling_policies_flattened_error(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_autoscaling_policies( - autoscaling_policies.ListAutoscalingPoliciesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_flattened_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.ListAutoscalingPoliciesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_autoscaling_policies( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_flattened_error_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_autoscaling_policies( - autoscaling_policies.ListAutoscalingPoliciesRequest(), - parent='parent_value', - ) - - -def test_list_autoscaling_policies_pager(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='abc', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[], - next_page_token='def', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='ghi', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_autoscaling_policies(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, autoscaling_policies.AutoscalingPolicy) - for i in results) - -def test_list_autoscaling_policies_pages(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='abc', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[], - next_page_token='def', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='ghi', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - ), - RuntimeError, - ) - pages = list(client.list_autoscaling_policies(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_async_pager(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='abc', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[], - next_page_token='def', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='ghi', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_autoscaling_policies(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, autoscaling_policies.AutoscalingPolicy) - for i in responses) - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_async_pages(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='abc', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[], - next_page_token='def', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='ghi', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_autoscaling_policies(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.DeleteAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_autoscaling_policy_from_dict(): - test_delete_autoscaling_policy(request_type=dict) - - -def test_delete_autoscaling_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - client.delete_autoscaling_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() - - -@pytest.mark.asyncio -async def test_delete_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.DeleteAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_autoscaling_policy_async_from_dict(): - await test_delete_autoscaling_policy_async(request_type=dict) - - -def test_delete_autoscaling_policy_field_headers(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.DeleteAutoscalingPolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - call.return_value = None - client.delete_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_autoscaling_policy_field_headers_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.DeleteAutoscalingPolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_autoscaling_policy_flattened(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_autoscaling_policy( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_autoscaling_policy_flattened_error(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_autoscaling_policy( - autoscaling_policies.DeleteAutoscalingPolicyRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_autoscaling_policy_flattened_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_autoscaling_policy( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_autoscaling_policy_flattened_error_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_autoscaling_policy( - autoscaling_policies.DeleteAutoscalingPolicyRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.AutoscalingPolicyServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.AutoscalingPolicyServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AutoscalingPolicyServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.AutoscalingPolicyServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AutoscalingPolicyServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.AutoscalingPolicyServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = AutoscalingPolicyServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.AutoscalingPolicyServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.AutoscalingPolicyServiceGrpcTransport, - transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.AutoscalingPolicyServiceGrpcTransport, - ) - -def test_autoscaling_policy_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.AutoscalingPolicyServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_autoscaling_policy_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.AutoscalingPolicyServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_autoscaling_policy', - 'update_autoscaling_policy', - 'get_autoscaling_policy', - 'list_autoscaling_policies', - 'delete_autoscaling_policy', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - -@requires_google_auth_gte_1_25_0 -def test_autoscaling_policy_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.AutoscalingPolicyServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_autoscaling_policy_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.AutoscalingPolicyServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_autoscaling_policy_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.AutoscalingPolicyServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_autoscaling_policy_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - AutoscalingPolicyServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_autoscaling_policy_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - AutoscalingPolicyServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.AutoscalingPolicyServiceGrpcTransport, - transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_autoscaling_policy_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.AutoscalingPolicyServiceGrpcTransport, - transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_autoscaling_policy_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.AutoscalingPolicyServiceGrpcTransport, grpc_helpers), - (transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_autoscaling_policy_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "dataproc.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="dataproc.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.AutoscalingPolicyServiceGrpcTransport, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport]) -def test_autoscaling_policy_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_autoscaling_policy_service_host_no_port(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), - ) - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_autoscaling_policy_service_host_with_port(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), - ) - assert client.transport._host == 'dataproc.googleapis.com:8000' - -def test_autoscaling_policy_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.AutoscalingPolicyServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_autoscaling_policy_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.AutoscalingPolicyServiceGrpcTransport, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport]) -def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.AutoscalingPolicyServiceGrpcTransport, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport]) -def test_autoscaling_policy_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_autoscaling_policy_path(): - project = "squid" - location = "clam" - autoscaling_policy = "whelk" - expected = "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format(project=project, location=location, autoscaling_policy=autoscaling_policy, ) - actual = AutoscalingPolicyServiceClient.autoscaling_policy_path(project, location, autoscaling_policy) - assert expected == actual - - -def test_parse_autoscaling_policy_path(): - expected = { - "project": "octopus", - "location": "oyster", - "autoscaling_policy": "nudibranch", - } - path = AutoscalingPolicyServiceClient.autoscaling_policy_path(**expected) - - # Check that the path construction is reversible. - actual = AutoscalingPolicyServiceClient.parse_autoscaling_policy_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = AutoscalingPolicyServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = AutoscalingPolicyServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = AutoscalingPolicyServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = AutoscalingPolicyServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = AutoscalingPolicyServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = AutoscalingPolicyServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = AutoscalingPolicyServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = AutoscalingPolicyServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = AutoscalingPolicyServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = AutoscalingPolicyServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = AutoscalingPolicyServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = AutoscalingPolicyServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = AutoscalingPolicyServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = AutoscalingPolicyServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = AutoscalingPolicyServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.AutoscalingPolicyServiceTransport, '_prep_wrapped_messages') as prep: - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.AutoscalingPolicyServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = AutoscalingPolicyServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_cluster_controller.py b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_cluster_controller.py deleted file mode 100644 index f0d8e2af..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_cluster_controller.py +++ /dev/null @@ -1,2449 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.dataproc_v1.services.cluster_controller import ClusterControllerAsyncClient -from google.cloud.dataproc_v1.services.cluster_controller import ClusterControllerClient -from google.cloud.dataproc_v1.services.cluster_controller import pagers -from google.cloud.dataproc_v1.services.cluster_controller import transports -from google.cloud.dataproc_v1.services.cluster_controller.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.dataproc_v1.types import clusters -from google.cloud.dataproc_v1.types import operations -from google.cloud.dataproc_v1.types import shared -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert ClusterControllerClient._get_default_mtls_endpoint(None) is None - assert ClusterControllerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert ClusterControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert ClusterControllerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert ClusterControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert ClusterControllerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - ClusterControllerClient, - ClusterControllerAsyncClient, -]) -def test_cluster_controller_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - ClusterControllerClient, - ClusterControllerAsyncClient, -]) -def test_cluster_controller_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.ClusterControllerGrpcTransport, "grpc"), - (transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_cluster_controller_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - ClusterControllerClient, - ClusterControllerAsyncClient, -]) -def test_cluster_controller_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_cluster_controller_client_get_transport_class(): - transport = ClusterControllerClient.get_transport_class() - available_transports = [ - transports.ClusterControllerGrpcTransport, - ] - assert transport in available_transports - - transport = ClusterControllerClient.get_transport_class("grpc") - assert transport == transports.ClusterControllerGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), - (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(ClusterControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerClient)) -@mock.patch.object(ClusterControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerAsyncClient)) -def test_cluster_controller_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(ClusterControllerClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(ClusterControllerClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc", "true"), - (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc", "false"), - (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(ClusterControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerClient)) -@mock.patch.object(ClusterControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_cluster_controller_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), - (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_cluster_controller_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), - (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_cluster_controller_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_cluster_controller_client_client_options_from_dict(): - with mock.patch('google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = ClusterControllerClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_cluster(transport: str = 'grpc', request_type=clusters.CreateClusterRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.CreateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_cluster_from_dict(): - test_create_cluster(request_type=dict) - - -def test_create_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - client.create_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.CreateClusterRequest() - - -@pytest.mark.asyncio -async def test_create_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.CreateClusterRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.CreateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_cluster_async_from_dict(): - await test_create_cluster_async(request_type=dict) - - -def test_create_cluster_flattened(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_cluster( - project_id='project_id_value', - region='region_value', - cluster=clusters.Cluster(project_id='project_id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster == clusters.Cluster(project_id='project_id_value') - - -def test_create_cluster_flattened_error(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_cluster( - clusters.CreateClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster=clusters.Cluster(project_id='project_id_value'), - ) - - -@pytest.mark.asyncio -async def test_create_cluster_flattened_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_cluster( - project_id='project_id_value', - region='region_value', - cluster=clusters.Cluster(project_id='project_id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster == clusters.Cluster(project_id='project_id_value') - - -@pytest.mark.asyncio -async def test_create_cluster_flattened_error_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_cluster( - clusters.CreateClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster=clusters.Cluster(project_id='project_id_value'), - ) - - -def test_update_cluster(transport: str = 'grpc', request_type=clusters.UpdateClusterRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.UpdateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_cluster_from_dict(): - test_update_cluster(request_type=dict) - - -def test_update_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - client.update_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.UpdateClusterRequest() - - -@pytest.mark.asyncio -async def test_update_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.UpdateClusterRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.UpdateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_cluster_async_from_dict(): - await test_update_cluster_async(request_type=dict) - - -def test_update_cluster_flattened(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - cluster=clusters.Cluster(project_id='project_id_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - assert args[0].cluster == clusters.Cluster(project_id='project_id_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_cluster_flattened_error(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_cluster( - clusters.UpdateClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - cluster=clusters.Cluster(project_id='project_id_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_cluster_flattened_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - cluster=clusters.Cluster(project_id='project_id_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - assert args[0].cluster == clusters.Cluster(project_id='project_id_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_cluster_flattened_error_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_cluster( - clusters.UpdateClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - cluster=clusters.Cluster(project_id='project_id_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_stop_cluster(transport: str = 'grpc', request_type=clusters.StopClusterRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.stop_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.StopClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_stop_cluster_from_dict(): - test_stop_cluster(request_type=dict) - - -def test_stop_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_cluster), - '__call__') as call: - client.stop_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.StopClusterRequest() - - -@pytest.mark.asyncio -async def test_stop_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.StopClusterRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.stop_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.StopClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_stop_cluster_async_from_dict(): - await test_stop_cluster_async(request_type=dict) - - -def test_start_cluster(transport: str = 'grpc', request_type=clusters.StartClusterRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.start_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.StartClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_start_cluster_from_dict(): - test_start_cluster(request_type=dict) - - -def test_start_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_cluster), - '__call__') as call: - client.start_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.StartClusterRequest() - - -@pytest.mark.asyncio -async def test_start_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.StartClusterRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.start_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.StartClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_start_cluster_async_from_dict(): - await test_start_cluster_async(request_type=dict) - - -def test_delete_cluster(transport: str = 'grpc', request_type=clusters.DeleteClusterRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.DeleteClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_cluster_from_dict(): - test_delete_cluster(request_type=dict) - - -def test_delete_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - client.delete_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.DeleteClusterRequest() - - -@pytest.mark.asyncio -async def test_delete_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.DeleteClusterRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.DeleteClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_cluster_async_from_dict(): - await test_delete_cluster_async(request_type=dict) - - -def test_delete_cluster_flattened(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - - -def test_delete_cluster_flattened_error(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_cluster( - clusters.DeleteClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_cluster_flattened_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - - -@pytest.mark.asyncio -async def test_delete_cluster_flattened_error_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_cluster( - clusters.DeleteClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - -def test_get_cluster(transport: str = 'grpc', request_type=clusters.GetClusterRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = clusters.Cluster( - project_id='project_id_value', - cluster_name='cluster_name_value', - cluster_uuid='cluster_uuid_value', - ) - response = client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.GetClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, clusters.Cluster) - assert response.project_id == 'project_id_value' - assert response.cluster_name == 'cluster_name_value' - assert response.cluster_uuid == 'cluster_uuid_value' - - -def test_get_cluster_from_dict(): - test_get_cluster(request_type=dict) - - -def test_get_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - client.get_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.GetClusterRequest() - - -@pytest.mark.asyncio -async def test_get_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.GetClusterRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(clusters.Cluster( - project_id='project_id_value', - cluster_name='cluster_name_value', - cluster_uuid='cluster_uuid_value', - )) - response = await client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.GetClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, clusters.Cluster) - assert response.project_id == 'project_id_value' - assert response.cluster_name == 'cluster_name_value' - assert response.cluster_uuid == 'cluster_uuid_value' - - -@pytest.mark.asyncio -async def test_get_cluster_async_from_dict(): - await test_get_cluster_async(request_type=dict) - - -def test_get_cluster_flattened(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = clusters.Cluster() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - - -def test_get_cluster_flattened_error(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_cluster( - clusters.GetClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - -@pytest.mark.asyncio -async def test_get_cluster_flattened_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = clusters.Cluster() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(clusters.Cluster()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - - -@pytest.mark.asyncio -async def test_get_cluster_flattened_error_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_cluster( - clusters.GetClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - -def test_list_clusters(transport: str = 'grpc', request_type=clusters.ListClustersRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = clusters.ListClustersResponse( - next_page_token='next_page_token_value', - ) - response = client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.ListClustersRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListClustersPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_clusters_from_dict(): - test_list_clusters(request_type=dict) - - -def test_list_clusters_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - client.list_clusters() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.ListClustersRequest() - - -@pytest.mark.asyncio -async def test_list_clusters_async(transport: str = 'grpc_asyncio', request_type=clusters.ListClustersRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(clusters.ListClustersResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.ListClustersRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListClustersAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_clusters_async_from_dict(): - await test_list_clusters_async(request_type=dict) - - -def test_list_clusters_flattened(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = clusters.ListClustersResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_clusters( - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].filter == 'filter_value' - - -def test_list_clusters_flattened_error(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_clusters( - clusters.ListClustersRequest(), - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - -@pytest.mark.asyncio -async def test_list_clusters_flattened_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = clusters.ListClustersResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(clusters.ListClustersResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_clusters( - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].filter == 'filter_value' - - -@pytest.mark.asyncio -async def test_list_clusters_flattened_error_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_clusters( - clusters.ListClustersRequest(), - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - -def test_list_clusters_pager(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - clusters.Cluster(), - ], - next_page_token='abc', - ), - clusters.ListClustersResponse( - clusters=[], - next_page_token='def', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - ], - next_page_token='ghi', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - ], - ), - RuntimeError, - ) - - metadata = () - pager = client.list_clusters(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, clusters.Cluster) - for i in results) - -def test_list_clusters_pages(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - clusters.Cluster(), - ], - next_page_token='abc', - ), - clusters.ListClustersResponse( - clusters=[], - next_page_token='def', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - ], - next_page_token='ghi', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - ], - ), - RuntimeError, - ) - pages = list(client.list_clusters(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_clusters_async_pager(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - clusters.Cluster(), - ], - next_page_token='abc', - ), - clusters.ListClustersResponse( - clusters=[], - next_page_token='def', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - ], - next_page_token='ghi', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_clusters(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, clusters.Cluster) - for i in responses) - -@pytest.mark.asyncio -async def test_list_clusters_async_pages(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - clusters.Cluster(), - ], - next_page_token='abc', - ), - clusters.ListClustersResponse( - clusters=[], - next_page_token='def', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - ], - next_page_token='ghi', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_clusters(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_diagnose_cluster(transport: str = 'grpc', request_type=clusters.DiagnoseClusterRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.diagnose_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.diagnose_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.DiagnoseClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_diagnose_cluster_from_dict(): - test_diagnose_cluster(request_type=dict) - - -def test_diagnose_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.diagnose_cluster), - '__call__') as call: - client.diagnose_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.DiagnoseClusterRequest() - - -@pytest.mark.asyncio -async def test_diagnose_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.DiagnoseClusterRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.diagnose_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.diagnose_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.DiagnoseClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_diagnose_cluster_async_from_dict(): - await test_diagnose_cluster_async(request_type=dict) - - -def test_diagnose_cluster_flattened(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.diagnose_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.diagnose_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - - -def test_diagnose_cluster_flattened_error(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.diagnose_cluster( - clusters.DiagnoseClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - -@pytest.mark.asyncio -async def test_diagnose_cluster_flattened_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.diagnose_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.diagnose_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - - -@pytest.mark.asyncio -async def test_diagnose_cluster_flattened_error_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.diagnose_cluster( - clusters.DiagnoseClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.ClusterControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.ClusterControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ClusterControllerClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.ClusterControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ClusterControllerClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.ClusterControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = ClusterControllerClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.ClusterControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.ClusterControllerGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.ClusterControllerGrpcTransport, - transports.ClusterControllerGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.ClusterControllerGrpcTransport, - ) - -def test_cluster_controller_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.ClusterControllerTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_cluster_controller_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.ClusterControllerTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_cluster', - 'update_cluster', - 'stop_cluster', - 'start_cluster', - 'delete_cluster', - 'get_cluster', - 'list_clusters', - 'diagnose_cluster', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_cluster_controller_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ClusterControllerTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_cluster_controller_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ClusterControllerTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_cluster_controller_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ClusterControllerTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_cluster_controller_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ClusterControllerClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_cluster_controller_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ClusterControllerClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ClusterControllerGrpcTransport, - transports.ClusterControllerGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_cluster_controller_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ClusterControllerGrpcTransport, - transports.ClusterControllerGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_cluster_controller_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.ClusterControllerGrpcTransport, grpc_helpers), - (transports.ClusterControllerGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_cluster_controller_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "dataproc.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="dataproc.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.ClusterControllerGrpcTransport, transports.ClusterControllerGrpcAsyncIOTransport]) -def test_cluster_controller_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_cluster_controller_host_no_port(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), - ) - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_cluster_controller_host_with_port(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), - ) - assert client.transport._host == 'dataproc.googleapis.com:8000' - -def test_cluster_controller_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ClusterControllerGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_cluster_controller_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ClusterControllerGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ClusterControllerGrpcTransport, transports.ClusterControllerGrpcAsyncIOTransport]) -def test_cluster_controller_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ClusterControllerGrpcTransport, transports.ClusterControllerGrpcAsyncIOTransport]) -def test_cluster_controller_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_cluster_controller_grpc_lro_client(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_cluster_controller_grpc_lro_async_client(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_cluster_path(): - project = "squid" - location = "clam" - cluster = "whelk" - expected = "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) - actual = ClusterControllerClient.cluster_path(project, location, cluster) - assert expected == actual - - -def test_parse_cluster_path(): - expected = { - "project": "octopus", - "location": "oyster", - "cluster": "nudibranch", - } - path = ClusterControllerClient.cluster_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_cluster_path(path) - assert expected == actual - -def test_service_path(): - project = "cuttlefish" - location = "mussel" - service = "winkle" - expected = "projects/{project}/locations/{location}/services/{service}".format(project=project, location=location, service=service, ) - actual = ClusterControllerClient.service_path(project, location, service) - assert expected == actual - - -def test_parse_service_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "service": "abalone", - } - path = ClusterControllerClient.service_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_service_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = ClusterControllerClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = ClusterControllerClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = ClusterControllerClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = ClusterControllerClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = ClusterControllerClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = ClusterControllerClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = ClusterControllerClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = ClusterControllerClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = ClusterControllerClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = ClusterControllerClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.ClusterControllerTransport, '_prep_wrapped_messages') as prep: - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.ClusterControllerTransport, '_prep_wrapped_messages') as prep: - transport_class = ClusterControllerClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_job_controller.py b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_job_controller.py deleted file mode 100644 index 245fde1e..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_job_controller.py +++ /dev/null @@ -1,2355 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.dataproc_v1.services.job_controller import JobControllerAsyncClient -from google.cloud.dataproc_v1.services.job_controller import JobControllerClient -from google.cloud.dataproc_v1.services.job_controller import pagers -from google.cloud.dataproc_v1.services.job_controller import transports -from google.cloud.dataproc_v1.services.job_controller.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.dataproc_v1.types import jobs -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert JobControllerClient._get_default_mtls_endpoint(None) is None - assert JobControllerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert JobControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert JobControllerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert JobControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert JobControllerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - JobControllerClient, - JobControllerAsyncClient, -]) -def test_job_controller_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - JobControllerClient, - JobControllerAsyncClient, -]) -def test_job_controller_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.JobControllerGrpcTransport, "grpc"), - (transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_controller_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - JobControllerClient, - JobControllerAsyncClient, -]) -def test_job_controller_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_job_controller_client_get_transport_class(): - transport = JobControllerClient.get_transport_class() - available_transports = [ - transports.JobControllerGrpcTransport, - ] - assert transport in available_transports - - transport = JobControllerClient.get_transport_class("grpc") - assert transport == transports.JobControllerGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), - (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(JobControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerClient)) -@mock.patch.object(JobControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerAsyncClient)) -def test_job_controller_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(JobControllerClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(JobControllerClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (JobControllerClient, transports.JobControllerGrpcTransport, "grpc", "true"), - (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (JobControllerClient, transports.JobControllerGrpcTransport, "grpc", "false"), - (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(JobControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerClient)) -@mock.patch.object(JobControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_job_controller_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), - (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_controller_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), - (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_controller_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_job_controller_client_client_options_from_dict(): - with mock.patch('google.cloud.dataproc_v1.services.job_controller.transports.JobControllerGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = JobControllerClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_submit_job(transport: str = 'grpc', request_type=jobs.SubmitJobRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job( - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), - ) - response = client.submit_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.SubmitJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -def test_submit_job_from_dict(): - test_submit_job(request_type=dict) - - -def test_submit_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job), - '__call__') as call: - client.submit_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.SubmitJobRequest() - - -@pytest.mark.asyncio -async def test_submit_job_async(transport: str = 'grpc_asyncio', request_type=jobs.SubmitJobRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - )) - response = await client.submit_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.SubmitJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -@pytest.mark.asyncio -async def test_submit_job_async_from_dict(): - await test_submit_job_async(request_type=dict) - - -def test_submit_job_flattened(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.submit_job( - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) - - -def test_submit_job_flattened_error(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.submit_job( - jobs.SubmitJobRequest(), - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - -@pytest.mark.asyncio -async def test_submit_job_flattened_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.submit_job( - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) - - -@pytest.mark.asyncio -async def test_submit_job_flattened_error_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.submit_job( - jobs.SubmitJobRequest(), - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - -def test_submit_job_as_operation(transport: str = 'grpc', request_type=jobs.SubmitJobRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job_as_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.submit_job_as_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.SubmitJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_submit_job_as_operation_from_dict(): - test_submit_job_as_operation(request_type=dict) - - -def test_submit_job_as_operation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job_as_operation), - '__call__') as call: - client.submit_job_as_operation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.SubmitJobRequest() - - -@pytest.mark.asyncio -async def test_submit_job_as_operation_async(transport: str = 'grpc_asyncio', request_type=jobs.SubmitJobRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job_as_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.submit_job_as_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.SubmitJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_submit_job_as_operation_async_from_dict(): - await test_submit_job_as_operation_async(request_type=dict) - - -def test_submit_job_as_operation_flattened(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job_as_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.submit_job_as_operation( - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) - - -def test_submit_job_as_operation_flattened_error(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.submit_job_as_operation( - jobs.SubmitJobRequest(), - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - -@pytest.mark.asyncio -async def test_submit_job_as_operation_flattened_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job_as_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.submit_job_as_operation( - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) - - -@pytest.mark.asyncio -async def test_submit_job_as_operation_flattened_error_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.submit_job_as_operation( - jobs.SubmitJobRequest(), - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - -def test_get_job(transport: str = 'grpc', request_type=jobs.GetJobRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job( - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), - ) - response = client.get_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.GetJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -def test_get_job_from_dict(): - test_get_job(request_type=dict) - - -def test_get_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_job), - '__call__') as call: - client.get_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.GetJobRequest() - - -@pytest.mark.asyncio -async def test_get_job_async(transport: str = 'grpc_asyncio', request_type=jobs.GetJobRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - )) - response = await client.get_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.GetJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -@pytest.mark.asyncio -async def test_get_job_async_from_dict(): - await test_get_job_async(request_type=dict) - - -def test_get_job_flattened(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_job( - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job_id == 'job_id_value' - - -def test_get_job_flattened_error(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_job( - jobs.GetJobRequest(), - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - -@pytest.mark.asyncio -async def test_get_job_flattened_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_job( - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job_id == 'job_id_value' - - -@pytest.mark.asyncio -async def test_get_job_flattened_error_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_job( - jobs.GetJobRequest(), - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - -def test_list_jobs(transport: str = 'grpc', request_type=jobs.ListJobsRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.ListJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.ListJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_jobs_from_dict(): - test_list_jobs(request_type=dict) - - -def test_list_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - client.list_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.ListJobsRequest() - - -@pytest.mark.asyncio -async def test_list_jobs_async(transport: str = 'grpc_asyncio', request_type=jobs.ListJobsRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.ListJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.ListJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_jobs_async_from_dict(): - await test_list_jobs_async(request_type=dict) - - -def test_list_jobs_flattened(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.ListJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_jobs( - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].filter == 'filter_value' - - -def test_list_jobs_flattened_error(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_jobs( - jobs.ListJobsRequest(), - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - -@pytest.mark.asyncio -async def test_list_jobs_flattened_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.ListJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.ListJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_jobs( - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].filter == 'filter_value' - - -@pytest.mark.asyncio -async def test_list_jobs_flattened_error_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_jobs( - jobs.ListJobsRequest(), - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - -def test_list_jobs_pager(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - jobs.Job(), - ], - next_page_token='abc', - ), - jobs.ListJobsResponse( - jobs=[], - next_page_token='def', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - ], - next_page_token='ghi', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - ], - ), - RuntimeError, - ) - - metadata = () - pager = client.list_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, jobs.Job) - for i in results) - -def test_list_jobs_pages(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - jobs.Job(), - ], - next_page_token='abc', - ), - jobs.ListJobsResponse( - jobs=[], - next_page_token='def', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - ], - next_page_token='ghi', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - ], - ), - RuntimeError, - ) - pages = list(client.list_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_jobs_async_pager(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - jobs.Job(), - ], - next_page_token='abc', - ), - jobs.ListJobsResponse( - jobs=[], - next_page_token='def', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - ], - next_page_token='ghi', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, jobs.Job) - for i in responses) - -@pytest.mark.asyncio -async def test_list_jobs_async_pages(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - jobs.Job(), - ], - next_page_token='abc', - ), - jobs.ListJobsResponse( - jobs=[], - next_page_token='def', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - ], - next_page_token='ghi', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_job(transport: str = 'grpc', request_type=jobs.UpdateJobRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job( - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), - ) - response = client.update_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.UpdateJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -def test_update_job_from_dict(): - test_update_job(request_type=dict) - - -def test_update_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_job), - '__call__') as call: - client.update_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.UpdateJobRequest() - - -@pytest.mark.asyncio -async def test_update_job_async(transport: str = 'grpc_asyncio', request_type=jobs.UpdateJobRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - )) - response = await client.update_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.UpdateJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -@pytest.mark.asyncio -async def test_update_job_async_from_dict(): - await test_update_job_async(request_type=dict) - - -def test_cancel_job(transport: str = 'grpc', request_type=jobs.CancelJobRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job( - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), - ) - response = client.cancel_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.CancelJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -def test_cancel_job_from_dict(): - test_cancel_job(request_type=dict) - - -def test_cancel_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_job), - '__call__') as call: - client.cancel_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.CancelJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_job_async(transport: str = 'grpc_asyncio', request_type=jobs.CancelJobRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - )) - response = await client.cancel_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.CancelJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -@pytest.mark.asyncio -async def test_cancel_job_async_from_dict(): - await test_cancel_job_async(request_type=dict) - - -def test_cancel_job_flattened(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_job( - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job_id == 'job_id_value' - - -def test_cancel_job_flattened_error(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_job( - jobs.CancelJobRequest(), - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_job_flattened_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_job( - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job_id == 'job_id_value' - - -@pytest.mark.asyncio -async def test_cancel_job_flattened_error_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_job( - jobs.CancelJobRequest(), - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - -def test_delete_job(transport: str = 'grpc', request_type=jobs.DeleteJobRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.DeleteJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_job_from_dict(): - test_delete_job(request_type=dict) - - -def test_delete_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_job), - '__call__') as call: - client.delete_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.DeleteJobRequest() - - -@pytest.mark.asyncio -async def test_delete_job_async(transport: str = 'grpc_asyncio', request_type=jobs.DeleteJobRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.DeleteJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_job_async_from_dict(): - await test_delete_job_async(request_type=dict) - - -def test_delete_job_flattened(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_job( - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job_id == 'job_id_value' - - -def test_delete_job_flattened_error(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_job( - jobs.DeleteJobRequest(), - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - -@pytest.mark.asyncio -async def test_delete_job_flattened_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_job( - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job_id == 'job_id_value' - - -@pytest.mark.asyncio -async def test_delete_job_flattened_error_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_job( - jobs.DeleteJobRequest(), - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.JobControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.JobControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobControllerClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.JobControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobControllerClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = JobControllerClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.JobControllerGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.JobControllerGrpcTransport, - transports.JobControllerGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.JobControllerGrpcTransport, - ) - -def test_job_controller_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.JobControllerTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_job_controller_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.dataproc_v1.services.job_controller.transports.JobControllerTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.JobControllerTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'submit_job', - 'submit_job_as_operation', - 'get_job', - 'list_jobs', - 'update_job', - 'cancel_job', - 'delete_job', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_job_controller_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobControllerTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_job_controller_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobControllerTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_job_controller_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobControllerTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_job_controller_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - JobControllerClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_job_controller_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - JobControllerClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.JobControllerGrpcTransport, - transports.JobControllerGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_job_controller_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.JobControllerGrpcTransport, - transports.JobControllerGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_job_controller_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.JobControllerGrpcTransport, grpc_helpers), - (transports.JobControllerGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_job_controller_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "dataproc.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="dataproc.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.JobControllerGrpcTransport, transports.JobControllerGrpcAsyncIOTransport]) -def test_job_controller_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_job_controller_host_no_port(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), - ) - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_job_controller_host_with_port(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), - ) - assert client.transport._host == 'dataproc.googleapis.com:8000' - -def test_job_controller_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.JobControllerGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_job_controller_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.JobControllerGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.JobControllerGrpcTransport, transports.JobControllerGrpcAsyncIOTransport]) -def test_job_controller_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.JobControllerGrpcTransport, transports.JobControllerGrpcAsyncIOTransport]) -def test_job_controller_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_job_controller_grpc_lro_client(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_job_controller_grpc_lro_async_client(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = JobControllerClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = JobControllerClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = JobControllerClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = JobControllerClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = JobControllerClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = JobControllerClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = JobControllerClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = JobControllerClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = JobControllerClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = JobControllerClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = JobControllerClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = JobControllerClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = JobControllerClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = JobControllerClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = JobControllerClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.JobControllerTransport, '_prep_wrapped_messages') as prep: - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.JobControllerTransport, '_prep_wrapped_messages') as prep: - transport_class = JobControllerClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py b/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py deleted file mode 100644 index cb8075d0..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py +++ /dev/null @@ -1,2863 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.dataproc_v1.services.workflow_template_service import WorkflowTemplateServiceAsyncClient -from google.cloud.dataproc_v1.services.workflow_template_service import WorkflowTemplateServiceClient -from google.cloud.dataproc_v1.services.workflow_template_service import pagers -from google.cloud.dataproc_v1.services.workflow_template_service import transports -from google.cloud.dataproc_v1.services.workflow_template_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.dataproc_v1.types import clusters -from google.cloud.dataproc_v1.types import jobs -from google.cloud.dataproc_v1.types import shared -from google.cloud.dataproc_v1.types import workflow_templates -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(None) is None - assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - WorkflowTemplateServiceClient, - WorkflowTemplateServiceAsyncClient, -]) -def test_workflow_template_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - WorkflowTemplateServiceClient, - WorkflowTemplateServiceAsyncClient, -]) -def test_workflow_template_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.WorkflowTemplateServiceGrpcTransport, "grpc"), - (transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_workflow_template_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - WorkflowTemplateServiceClient, - WorkflowTemplateServiceAsyncClient, -]) -def test_workflow_template_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_workflow_template_service_client_get_transport_class(): - transport = WorkflowTemplateServiceClient.get_transport_class() - available_transports = [ - transports.WorkflowTemplateServiceGrpcTransport, - ] - assert transport in available_transports - - transport = WorkflowTemplateServiceClient.get_transport_class("grpc") - assert transport == transports.WorkflowTemplateServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc"), - (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(WorkflowTemplateServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceClient)) -@mock.patch.object(WorkflowTemplateServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceAsyncClient)) -def test_workflow_template_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(WorkflowTemplateServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(WorkflowTemplateServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc", "true"), - (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc", "false"), - (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(WorkflowTemplateServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceClient)) -@mock.patch.object(WorkflowTemplateServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_workflow_template_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc"), - (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_workflow_template_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc"), - (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_workflow_template_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_workflow_template_service_client_client_options_from_dict(): - with mock.patch('google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = WorkflowTemplateServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_workflow_template(transport: str = 'grpc', request_type=workflow_templates.CreateWorkflowTemplateRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate( - id='id_value', - name='name_value', - version=774, - ) - response = client.create_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, workflow_templates.WorkflowTemplate) - assert response.id == 'id_value' - assert response.name == 'name_value' - assert response.version == 774 - - -def test_create_workflow_template_from_dict(): - test_create_workflow_template(request_type=dict) - - -def test_create_workflow_template_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - client.create_workflow_template() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() - - -@pytest.mark.asyncio -async def test_create_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.CreateWorkflowTemplateRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate( - id='id_value', - name='name_value', - version=774, - )) - response = await client.create_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, workflow_templates.WorkflowTemplate) - assert response.id == 'id_value' - assert response.name == 'name_value' - assert response.version == 774 - - -@pytest.mark.asyncio -async def test_create_workflow_template_async_from_dict(): - await test_create_workflow_template_async(request_type=dict) - - -def test_create_workflow_template_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.CreateWorkflowTemplateRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - call.return_value = workflow_templates.WorkflowTemplate() - client.create_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_workflow_template_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.CreateWorkflowTemplateRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) - await client.create_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_workflow_template_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_workflow_template( - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') - - -def test_create_workflow_template_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_workflow_template( - workflow_templates.CreateWorkflowTemplateRequest(), - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_create_workflow_template_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_workflow_template( - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') - - -@pytest.mark.asyncio -async def test_create_workflow_template_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_workflow_template( - workflow_templates.CreateWorkflowTemplateRequest(), - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - -def test_get_workflow_template(transport: str = 'grpc', request_type=workflow_templates.GetWorkflowTemplateRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate( - id='id_value', - name='name_value', - version=774, - ) - response = client.get_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.GetWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, workflow_templates.WorkflowTemplate) - assert response.id == 'id_value' - assert response.name == 'name_value' - assert response.version == 774 - - -def test_get_workflow_template_from_dict(): - test_get_workflow_template(request_type=dict) - - -def test_get_workflow_template_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - client.get_workflow_template() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.GetWorkflowTemplateRequest() - - -@pytest.mark.asyncio -async def test_get_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.GetWorkflowTemplateRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate( - id='id_value', - name='name_value', - version=774, - )) - response = await client.get_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.GetWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, workflow_templates.WorkflowTemplate) - assert response.id == 'id_value' - assert response.name == 'name_value' - assert response.version == 774 - - -@pytest.mark.asyncio -async def test_get_workflow_template_async_from_dict(): - await test_get_workflow_template_async(request_type=dict) - - -def test_get_workflow_template_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.GetWorkflowTemplateRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - call.return_value = workflow_templates.WorkflowTemplate() - client.get_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_workflow_template_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.GetWorkflowTemplateRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) - await client.get_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_workflow_template_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_workflow_template( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_workflow_template_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_workflow_template( - workflow_templates.GetWorkflowTemplateRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_workflow_template_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_workflow_template( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_workflow_template_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_workflow_template( - workflow_templates.GetWorkflowTemplateRequest(), - name='name_value', - ) - - -def test_instantiate_workflow_template(transport: str = 'grpc', request_type=workflow_templates.InstantiateWorkflowTemplateRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.instantiate_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_instantiate_workflow_template_from_dict(): - test_instantiate_workflow_template(request_type=dict) - - -def test_instantiate_workflow_template_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - client.instantiate_workflow_template() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() - - -@pytest.mark.asyncio -async def test_instantiate_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.InstantiateWorkflowTemplateRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.instantiate_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_instantiate_workflow_template_async_from_dict(): - await test_instantiate_workflow_template_async(request_type=dict) - - -def test_instantiate_workflow_template_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.InstantiateWorkflowTemplateRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.instantiate_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_instantiate_workflow_template_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.InstantiateWorkflowTemplateRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.instantiate_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_instantiate_workflow_template_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.instantiate_workflow_template( - name='name_value', - parameters={'key_value': 'value_value'}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].parameters == {'key_value': 'value_value'} - - -def test_instantiate_workflow_template_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.instantiate_workflow_template( - workflow_templates.InstantiateWorkflowTemplateRequest(), - name='name_value', - parameters={'key_value': 'value_value'}, - ) - - -@pytest.mark.asyncio -async def test_instantiate_workflow_template_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.instantiate_workflow_template( - name='name_value', - parameters={'key_value': 'value_value'}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].parameters == {'key_value': 'value_value'} - - -@pytest.mark.asyncio -async def test_instantiate_workflow_template_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.instantiate_workflow_template( - workflow_templates.InstantiateWorkflowTemplateRequest(), - name='name_value', - parameters={'key_value': 'value_value'}, - ) - - -def test_instantiate_inline_workflow_template(transport: str = 'grpc', request_type=workflow_templates.InstantiateInlineWorkflowTemplateRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.instantiate_inline_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_instantiate_inline_workflow_template_from_dict(): - test_instantiate_inline_workflow_template(request_type=dict) - - -def test_instantiate_inline_workflow_template_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - client.instantiate_inline_workflow_template() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() - - -@pytest.mark.asyncio -async def test_instantiate_inline_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.InstantiateInlineWorkflowTemplateRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.instantiate_inline_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_instantiate_inline_workflow_template_async_from_dict(): - await test_instantiate_inline_workflow_template_async(request_type=dict) - - -def test_instantiate_inline_workflow_template_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.instantiate_inline_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_instantiate_inline_workflow_template_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.instantiate_inline_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_instantiate_inline_workflow_template_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.instantiate_inline_workflow_template( - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') - - -def test_instantiate_inline_workflow_template_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.instantiate_inline_workflow_template( - workflow_templates.InstantiateInlineWorkflowTemplateRequest(), - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_instantiate_inline_workflow_template_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.instantiate_inline_workflow_template( - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') - - -@pytest.mark.asyncio -async def test_instantiate_inline_workflow_template_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.instantiate_inline_workflow_template( - workflow_templates.InstantiateInlineWorkflowTemplateRequest(), - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - -def test_update_workflow_template(transport: str = 'grpc', request_type=workflow_templates.UpdateWorkflowTemplateRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate( - id='id_value', - name='name_value', - version=774, - ) - response = client.update_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, workflow_templates.WorkflowTemplate) - assert response.id == 'id_value' - assert response.name == 'name_value' - assert response.version == 774 - - -def test_update_workflow_template_from_dict(): - test_update_workflow_template(request_type=dict) - - -def test_update_workflow_template_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - client.update_workflow_template() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() - - -@pytest.mark.asyncio -async def test_update_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.UpdateWorkflowTemplateRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate( - id='id_value', - name='name_value', - version=774, - )) - response = await client.update_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, workflow_templates.WorkflowTemplate) - assert response.id == 'id_value' - assert response.name == 'name_value' - assert response.version == 774 - - -@pytest.mark.asyncio -async def test_update_workflow_template_async_from_dict(): - await test_update_workflow_template_async(request_type=dict) - - -def test_update_workflow_template_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.UpdateWorkflowTemplateRequest() - - request.template.name = 'template.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - call.return_value = workflow_templates.WorkflowTemplate() - client.update_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'template.name=template.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_workflow_template_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.UpdateWorkflowTemplateRequest() - - request.template.name = 'template.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) - await client.update_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'template.name=template.name/value', - ) in kw['metadata'] - - -def test_update_workflow_template_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_workflow_template( - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') - - -def test_update_workflow_template_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_workflow_template( - workflow_templates.UpdateWorkflowTemplateRequest(), - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_update_workflow_template_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_workflow_template( - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') - - -@pytest.mark.asyncio -async def test_update_workflow_template_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_workflow_template( - workflow_templates.UpdateWorkflowTemplateRequest(), - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - -def test_list_workflow_templates(transport: str = 'grpc', request_type=workflow_templates.ListWorkflowTemplatesRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.ListWorkflowTemplatesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_workflow_templates(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListWorkflowTemplatesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_workflow_templates_from_dict(): - test_list_workflow_templates(request_type=dict) - - -def test_list_workflow_templates_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - client.list_workflow_templates() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() - - -@pytest.mark.asyncio -async def test_list_workflow_templates_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.ListWorkflowTemplatesRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.ListWorkflowTemplatesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_workflow_templates(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListWorkflowTemplatesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_workflow_templates_async_from_dict(): - await test_list_workflow_templates_async(request_type=dict) - - -def test_list_workflow_templates_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.ListWorkflowTemplatesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - call.return_value = workflow_templates.ListWorkflowTemplatesResponse() - client.list_workflow_templates(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_workflow_templates_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.ListWorkflowTemplatesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.ListWorkflowTemplatesResponse()) - await client.list_workflow_templates(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_workflow_templates_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.ListWorkflowTemplatesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_workflow_templates( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_workflow_templates_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_workflow_templates( - workflow_templates.ListWorkflowTemplatesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_workflow_templates_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.ListWorkflowTemplatesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.ListWorkflowTemplatesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_workflow_templates( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_workflow_templates_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_workflow_templates( - workflow_templates.ListWorkflowTemplatesRequest(), - parent='parent_value', - ) - - -def test_list_workflow_templates_pager(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - next_page_token='abc', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[], - next_page_token='def', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - ], - next_page_token='ghi', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_workflow_templates(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, workflow_templates.WorkflowTemplate) - for i in results) - -def test_list_workflow_templates_pages(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - next_page_token='abc', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[], - next_page_token='def', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - ], - next_page_token='ghi', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - ), - RuntimeError, - ) - pages = list(client.list_workflow_templates(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_workflow_templates_async_pager(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - next_page_token='abc', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[], - next_page_token='def', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - ], - next_page_token='ghi', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_workflow_templates(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, workflow_templates.WorkflowTemplate) - for i in responses) - -@pytest.mark.asyncio -async def test_list_workflow_templates_async_pages(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - next_page_token='abc', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[], - next_page_token='def', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - ], - next_page_token='ghi', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_workflow_templates(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_workflow_template(transport: str = 'grpc', request_type=workflow_templates.DeleteWorkflowTemplateRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_workflow_template_from_dict(): - test_delete_workflow_template(request_type=dict) - - -def test_delete_workflow_template_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - client.delete_workflow_template() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() - - -@pytest.mark.asyncio -async def test_delete_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.DeleteWorkflowTemplateRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_workflow_template_async_from_dict(): - await test_delete_workflow_template_async(request_type=dict) - - -def test_delete_workflow_template_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.DeleteWorkflowTemplateRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - call.return_value = None - client.delete_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_workflow_template_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.DeleteWorkflowTemplateRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_workflow_template_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_workflow_template( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_workflow_template_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_workflow_template( - workflow_templates.DeleteWorkflowTemplateRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_workflow_template_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_workflow_template( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_workflow_template_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_workflow_template( - workflow_templates.DeleteWorkflowTemplateRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.WorkflowTemplateServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.WorkflowTemplateServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = WorkflowTemplateServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.WorkflowTemplateServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = WorkflowTemplateServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.WorkflowTemplateServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = WorkflowTemplateServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.WorkflowTemplateServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.WorkflowTemplateServiceGrpcTransport, - transports.WorkflowTemplateServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.WorkflowTemplateServiceGrpcTransport, - ) - -def test_workflow_template_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.WorkflowTemplateServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_workflow_template_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.WorkflowTemplateServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_workflow_template', - 'get_workflow_template', - 'instantiate_workflow_template', - 'instantiate_inline_workflow_template', - 'update_workflow_template', - 'list_workflow_templates', - 'delete_workflow_template', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_workflow_template_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.WorkflowTemplateServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_workflow_template_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.WorkflowTemplateServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_workflow_template_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.WorkflowTemplateServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_workflow_template_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - WorkflowTemplateServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_workflow_template_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - WorkflowTemplateServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.WorkflowTemplateServiceGrpcTransport, - transports.WorkflowTemplateServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_workflow_template_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.WorkflowTemplateServiceGrpcTransport, - transports.WorkflowTemplateServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_workflow_template_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.WorkflowTemplateServiceGrpcTransport, grpc_helpers), - (transports.WorkflowTemplateServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_workflow_template_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "dataproc.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="dataproc.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.WorkflowTemplateServiceGrpcTransport, transports.WorkflowTemplateServiceGrpcAsyncIOTransport]) -def test_workflow_template_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_workflow_template_service_host_no_port(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), - ) - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_workflow_template_service_host_with_port(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), - ) - assert client.transport._host == 'dataproc.googleapis.com:8000' - -def test_workflow_template_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.WorkflowTemplateServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_workflow_template_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.WorkflowTemplateServiceGrpcTransport, transports.WorkflowTemplateServiceGrpcAsyncIOTransport]) -def test_workflow_template_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.WorkflowTemplateServiceGrpcTransport, transports.WorkflowTemplateServiceGrpcAsyncIOTransport]) -def test_workflow_template_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_workflow_template_service_grpc_lro_client(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_workflow_template_service_grpc_lro_async_client(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_cluster_path(): - project = "squid" - location = "clam" - cluster = "whelk" - expected = "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) - actual = WorkflowTemplateServiceClient.cluster_path(project, location, cluster) - assert expected == actual - - -def test_parse_cluster_path(): - expected = { - "project": "octopus", - "location": "oyster", - "cluster": "nudibranch", - } - path = WorkflowTemplateServiceClient.cluster_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_cluster_path(path) - assert expected == actual - -def test_service_path(): - project = "cuttlefish" - location = "mussel" - service = "winkle" - expected = "projects/{project}/locations/{location}/services/{service}".format(project=project, location=location, service=service, ) - actual = WorkflowTemplateServiceClient.service_path(project, location, service) - assert expected == actual - - -def test_parse_service_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "service": "abalone", - } - path = WorkflowTemplateServiceClient.service_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_service_path(path) - assert expected == actual - -def test_workflow_template_path(): - project = "squid" - region = "clam" - workflow_template = "whelk" - expected = "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format(project=project, region=region, workflow_template=workflow_template, ) - actual = WorkflowTemplateServiceClient.workflow_template_path(project, region, workflow_template) - assert expected == actual - - -def test_parse_workflow_template_path(): - expected = { - "project": "octopus", - "region": "oyster", - "workflow_template": "nudibranch", - } - path = WorkflowTemplateServiceClient.workflow_template_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_workflow_template_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = WorkflowTemplateServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = WorkflowTemplateServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = WorkflowTemplateServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = WorkflowTemplateServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = WorkflowTemplateServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = WorkflowTemplateServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = WorkflowTemplateServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = WorkflowTemplateServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = WorkflowTemplateServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = WorkflowTemplateServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.WorkflowTemplateServiceTransport, '_prep_wrapped_messages') as prep: - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.WorkflowTemplateServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = WorkflowTemplateServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta2/.coveragerc b/owl-bot-staging/v1beta2/.coveragerc deleted file mode 100644 index 240638d1..00000000 --- a/owl-bot-staging/v1beta2/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/dataproc/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta2/MANIFEST.in b/owl-bot-staging/v1beta2/MANIFEST.in deleted file mode 100644 index 450e5822..00000000 --- a/owl-bot-staging/v1beta2/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/dataproc *.py -recursive-include google/cloud/dataproc_v1beta2 *.py diff --git a/owl-bot-staging/v1beta2/README.rst b/owl-bot-staging/v1beta2/README.rst deleted file mode 100644 index b751dfd9..00000000 --- a/owl-bot-staging/v1beta2/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Dataproc API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Dataproc API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta2/docs/conf.py b/owl-bot-staging/v1beta2/docs/conf.py deleted file mode 100644 index 02417582..00000000 --- a/owl-bot-staging/v1beta2/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-dataproc documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-dataproc" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-dataproc-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-dataproc.tex", - u"google-cloud-dataproc Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-dataproc", - u"Google Cloud Dataproc Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-dataproc", - u"google-cloud-dataproc Documentation", - author, - "google-cloud-dataproc", - "GAPIC library for Google Cloud Dataproc API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/autoscaling_policy_service.rst b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/autoscaling_policy_service.rst deleted file mode 100644 index cc81bb57..00000000 --- a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/autoscaling_policy_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -AutoscalingPolicyService ------------------------------------------- - -.. automodule:: google.cloud.dataproc_v1beta2.services.autoscaling_policy_service - :members: - :inherited-members: - -.. automodule:: google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/cluster_controller.rst b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/cluster_controller.rst deleted file mode 100644 index 3e375a37..00000000 --- a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/cluster_controller.rst +++ /dev/null @@ -1,10 +0,0 @@ -ClusterController ------------------------------------ - -.. automodule:: google.cloud.dataproc_v1beta2.services.cluster_controller - :members: - :inherited-members: - -.. automodule:: google.cloud.dataproc_v1beta2.services.cluster_controller.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/job_controller.rst b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/job_controller.rst deleted file mode 100644 index 8ca76058..00000000 --- a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/job_controller.rst +++ /dev/null @@ -1,10 +0,0 @@ -JobController -------------------------------- - -.. automodule:: google.cloud.dataproc_v1beta2.services.job_controller - :members: - :inherited-members: - -.. automodule:: google.cloud.dataproc_v1beta2.services.job_controller.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/services.rst b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/services.rst deleted file mode 100644 index 23c2d640..00000000 --- a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/services.rst +++ /dev/null @@ -1,9 +0,0 @@ -Services for Google Cloud Dataproc v1beta2 API -============================================== -.. toctree:: - :maxdepth: 2 - - autoscaling_policy_service - cluster_controller - job_controller - workflow_template_service diff --git a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/types.rst b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/types.rst deleted file mode 100644 index 1358e4c1..00000000 --- a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Dataproc v1beta2 API -=========================================== - -.. automodule:: google.cloud.dataproc_v1beta2.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/workflow_template_service.rst b/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/workflow_template_service.rst deleted file mode 100644 index d93e941b..00000000 --- a/owl-bot-staging/v1beta2/docs/dataproc_v1beta2/workflow_template_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -WorkflowTemplateService ------------------------------------------ - -.. automodule:: google.cloud.dataproc_v1beta2.services.workflow_template_service - :members: - :inherited-members: - -.. automodule:: google.cloud.dataproc_v1beta2.services.workflow_template_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta2/docs/index.rst b/owl-bot-staging/v1beta2/docs/index.rst deleted file mode 100644 index 53ec37c5..00000000 --- a/owl-bot-staging/v1beta2/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - dataproc_v1beta2/services - dataproc_v1beta2/types diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc/__init__.py deleted file mode 100644 index 0e182517..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc/__init__.py +++ /dev/null @@ -1,205 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.client import AutoscalingPolicyServiceClient -from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.async_client import AutoscalingPolicyServiceAsyncClient -from google.cloud.dataproc_v1beta2.services.cluster_controller.client import ClusterControllerClient -from google.cloud.dataproc_v1beta2.services.cluster_controller.async_client import ClusterControllerAsyncClient -from google.cloud.dataproc_v1beta2.services.job_controller.client import JobControllerClient -from google.cloud.dataproc_v1beta2.services.job_controller.async_client import JobControllerAsyncClient -from google.cloud.dataproc_v1beta2.services.workflow_template_service.client import WorkflowTemplateServiceClient -from google.cloud.dataproc_v1beta2.services.workflow_template_service.async_client import WorkflowTemplateServiceAsyncClient - -from google.cloud.dataproc_v1beta2.types.autoscaling_policies import AutoscalingPolicy -from google.cloud.dataproc_v1beta2.types.autoscaling_policies import BasicAutoscalingAlgorithm -from google.cloud.dataproc_v1beta2.types.autoscaling_policies import BasicYarnAutoscalingConfig -from google.cloud.dataproc_v1beta2.types.autoscaling_policies import CreateAutoscalingPolicyRequest -from google.cloud.dataproc_v1beta2.types.autoscaling_policies import DeleteAutoscalingPolicyRequest -from google.cloud.dataproc_v1beta2.types.autoscaling_policies import GetAutoscalingPolicyRequest -from google.cloud.dataproc_v1beta2.types.autoscaling_policies import InstanceGroupAutoscalingPolicyConfig -from google.cloud.dataproc_v1beta2.types.autoscaling_policies import ListAutoscalingPoliciesRequest -from google.cloud.dataproc_v1beta2.types.autoscaling_policies import ListAutoscalingPoliciesResponse -from google.cloud.dataproc_v1beta2.types.autoscaling_policies import UpdateAutoscalingPolicyRequest -from google.cloud.dataproc_v1beta2.types.clusters import AcceleratorConfig -from google.cloud.dataproc_v1beta2.types.clusters import AutoscalingConfig -from google.cloud.dataproc_v1beta2.types.clusters import Cluster -from google.cloud.dataproc_v1beta2.types.clusters import ClusterConfig -from google.cloud.dataproc_v1beta2.types.clusters import ClusterMetrics -from google.cloud.dataproc_v1beta2.types.clusters import ClusterStatus -from google.cloud.dataproc_v1beta2.types.clusters import CreateClusterRequest -from google.cloud.dataproc_v1beta2.types.clusters import DeleteClusterRequest -from google.cloud.dataproc_v1beta2.types.clusters import DiagnoseClusterRequest -from google.cloud.dataproc_v1beta2.types.clusters import DiagnoseClusterResults -from google.cloud.dataproc_v1beta2.types.clusters import DiskConfig -from google.cloud.dataproc_v1beta2.types.clusters import EncryptionConfig -from google.cloud.dataproc_v1beta2.types.clusters import EndpointConfig -from google.cloud.dataproc_v1beta2.types.clusters import GceClusterConfig -from google.cloud.dataproc_v1beta2.types.clusters import GetClusterRequest -from google.cloud.dataproc_v1beta2.types.clusters import GkeClusterConfig -from google.cloud.dataproc_v1beta2.types.clusters import InstanceGroupConfig -from google.cloud.dataproc_v1beta2.types.clusters import KerberosConfig -from google.cloud.dataproc_v1beta2.types.clusters import LifecycleConfig -from google.cloud.dataproc_v1beta2.types.clusters import ListClustersRequest -from google.cloud.dataproc_v1beta2.types.clusters import ListClustersResponse -from google.cloud.dataproc_v1beta2.types.clusters import ManagedGroupConfig -from google.cloud.dataproc_v1beta2.types.clusters import NodeInitializationAction -from google.cloud.dataproc_v1beta2.types.clusters import ReservationAffinity -from google.cloud.dataproc_v1beta2.types.clusters import SecurityConfig -from google.cloud.dataproc_v1beta2.types.clusters import SoftwareConfig -from google.cloud.dataproc_v1beta2.types.clusters import UpdateClusterRequest -from google.cloud.dataproc_v1beta2.types.jobs import CancelJobRequest -from google.cloud.dataproc_v1beta2.types.jobs import DeleteJobRequest -from google.cloud.dataproc_v1beta2.types.jobs import GetJobRequest -from google.cloud.dataproc_v1beta2.types.jobs import HadoopJob -from google.cloud.dataproc_v1beta2.types.jobs import HiveJob -from google.cloud.dataproc_v1beta2.types.jobs import Job -from google.cloud.dataproc_v1beta2.types.jobs import JobMetadata -from google.cloud.dataproc_v1beta2.types.jobs import JobPlacement -from google.cloud.dataproc_v1beta2.types.jobs import JobReference -from google.cloud.dataproc_v1beta2.types.jobs import JobScheduling -from google.cloud.dataproc_v1beta2.types.jobs import JobStatus -from google.cloud.dataproc_v1beta2.types.jobs import ListJobsRequest -from google.cloud.dataproc_v1beta2.types.jobs import ListJobsResponse -from google.cloud.dataproc_v1beta2.types.jobs import LoggingConfig -from google.cloud.dataproc_v1beta2.types.jobs import PigJob -from google.cloud.dataproc_v1beta2.types.jobs import PrestoJob -from google.cloud.dataproc_v1beta2.types.jobs import PySparkJob -from google.cloud.dataproc_v1beta2.types.jobs import QueryList -from google.cloud.dataproc_v1beta2.types.jobs import SparkJob -from google.cloud.dataproc_v1beta2.types.jobs import SparkRJob -from google.cloud.dataproc_v1beta2.types.jobs import SparkSqlJob -from google.cloud.dataproc_v1beta2.types.jobs import SubmitJobRequest -from google.cloud.dataproc_v1beta2.types.jobs import UpdateJobRequest -from google.cloud.dataproc_v1beta2.types.jobs import YarnApplication -from google.cloud.dataproc_v1beta2.types.operations import ClusterOperationMetadata -from google.cloud.dataproc_v1beta2.types.operations import ClusterOperationStatus -from google.cloud.dataproc_v1beta2.types.shared import Component -from google.cloud.dataproc_v1beta2.types.workflow_templates import ClusterOperation -from google.cloud.dataproc_v1beta2.types.workflow_templates import ClusterSelector -from google.cloud.dataproc_v1beta2.types.workflow_templates import CreateWorkflowTemplateRequest -from google.cloud.dataproc_v1beta2.types.workflow_templates import DeleteWorkflowTemplateRequest -from google.cloud.dataproc_v1beta2.types.workflow_templates import GetWorkflowTemplateRequest -from google.cloud.dataproc_v1beta2.types.workflow_templates import InstantiateInlineWorkflowTemplateRequest -from google.cloud.dataproc_v1beta2.types.workflow_templates import InstantiateWorkflowTemplateRequest -from google.cloud.dataproc_v1beta2.types.workflow_templates import ListWorkflowTemplatesRequest -from google.cloud.dataproc_v1beta2.types.workflow_templates import ListWorkflowTemplatesResponse -from google.cloud.dataproc_v1beta2.types.workflow_templates import ManagedCluster -from google.cloud.dataproc_v1beta2.types.workflow_templates import OrderedJob -from google.cloud.dataproc_v1beta2.types.workflow_templates import ParameterValidation -from google.cloud.dataproc_v1beta2.types.workflow_templates import RegexValidation -from google.cloud.dataproc_v1beta2.types.workflow_templates import TemplateParameter -from google.cloud.dataproc_v1beta2.types.workflow_templates import UpdateWorkflowTemplateRequest -from google.cloud.dataproc_v1beta2.types.workflow_templates import ValueValidation -from google.cloud.dataproc_v1beta2.types.workflow_templates import WorkflowGraph -from google.cloud.dataproc_v1beta2.types.workflow_templates import WorkflowMetadata -from google.cloud.dataproc_v1beta2.types.workflow_templates import WorkflowNode -from google.cloud.dataproc_v1beta2.types.workflow_templates import WorkflowTemplate -from google.cloud.dataproc_v1beta2.types.workflow_templates import WorkflowTemplatePlacement - -__all__ = ('AutoscalingPolicyServiceClient', - 'AutoscalingPolicyServiceAsyncClient', - 'ClusterControllerClient', - 'ClusterControllerAsyncClient', - 'JobControllerClient', - 'JobControllerAsyncClient', - 'WorkflowTemplateServiceClient', - 'WorkflowTemplateServiceAsyncClient', - 'AutoscalingPolicy', - 'BasicAutoscalingAlgorithm', - 'BasicYarnAutoscalingConfig', - 'CreateAutoscalingPolicyRequest', - 'DeleteAutoscalingPolicyRequest', - 'GetAutoscalingPolicyRequest', - 'InstanceGroupAutoscalingPolicyConfig', - 'ListAutoscalingPoliciesRequest', - 'ListAutoscalingPoliciesResponse', - 'UpdateAutoscalingPolicyRequest', - 'AcceleratorConfig', - 'AutoscalingConfig', - 'Cluster', - 'ClusterConfig', - 'ClusterMetrics', - 'ClusterStatus', - 'CreateClusterRequest', - 'DeleteClusterRequest', - 'DiagnoseClusterRequest', - 'DiagnoseClusterResults', - 'DiskConfig', - 'EncryptionConfig', - 'EndpointConfig', - 'GceClusterConfig', - 'GetClusterRequest', - 'GkeClusterConfig', - 'InstanceGroupConfig', - 'KerberosConfig', - 'LifecycleConfig', - 'ListClustersRequest', - 'ListClustersResponse', - 'ManagedGroupConfig', - 'NodeInitializationAction', - 'ReservationAffinity', - 'SecurityConfig', - 'SoftwareConfig', - 'UpdateClusterRequest', - 'CancelJobRequest', - 'DeleteJobRequest', - 'GetJobRequest', - 'HadoopJob', - 'HiveJob', - 'Job', - 'JobMetadata', - 'JobPlacement', - 'JobReference', - 'JobScheduling', - 'JobStatus', - 'ListJobsRequest', - 'ListJobsResponse', - 'LoggingConfig', - 'PigJob', - 'PrestoJob', - 'PySparkJob', - 'QueryList', - 'SparkJob', - 'SparkRJob', - 'SparkSqlJob', - 'SubmitJobRequest', - 'UpdateJobRequest', - 'YarnApplication', - 'ClusterOperationMetadata', - 'ClusterOperationStatus', - 'Component', - 'ClusterOperation', - 'ClusterSelector', - 'CreateWorkflowTemplateRequest', - 'DeleteWorkflowTemplateRequest', - 'GetWorkflowTemplateRequest', - 'InstantiateInlineWorkflowTemplateRequest', - 'InstantiateWorkflowTemplateRequest', - 'ListWorkflowTemplatesRequest', - 'ListWorkflowTemplatesResponse', - 'ManagedCluster', - 'OrderedJob', - 'ParameterValidation', - 'RegexValidation', - 'TemplateParameter', - 'UpdateWorkflowTemplateRequest', - 'ValueValidation', - 'WorkflowGraph', - 'WorkflowMetadata', - 'WorkflowNode', - 'WorkflowTemplate', - 'WorkflowTemplatePlacement', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc/py.typed b/owl-bot-staging/v1beta2/google/cloud/dataproc/py.typed deleted file mode 100644 index aac99cba..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-dataproc package uses inline types. diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/__init__.py deleted file mode 100644 index a143d99f..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/__init__.py +++ /dev/null @@ -1,206 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from .services.autoscaling_policy_service import AutoscalingPolicyServiceClient -from .services.autoscaling_policy_service import AutoscalingPolicyServiceAsyncClient -from .services.cluster_controller import ClusterControllerClient -from .services.cluster_controller import ClusterControllerAsyncClient -from .services.job_controller import JobControllerClient -from .services.job_controller import JobControllerAsyncClient -from .services.workflow_template_service import WorkflowTemplateServiceClient -from .services.workflow_template_service import WorkflowTemplateServiceAsyncClient - -from .types.autoscaling_policies import AutoscalingPolicy -from .types.autoscaling_policies import BasicAutoscalingAlgorithm -from .types.autoscaling_policies import BasicYarnAutoscalingConfig -from .types.autoscaling_policies import CreateAutoscalingPolicyRequest -from .types.autoscaling_policies import DeleteAutoscalingPolicyRequest -from .types.autoscaling_policies import GetAutoscalingPolicyRequest -from .types.autoscaling_policies import InstanceGroupAutoscalingPolicyConfig -from .types.autoscaling_policies import ListAutoscalingPoliciesRequest -from .types.autoscaling_policies import ListAutoscalingPoliciesResponse -from .types.autoscaling_policies import UpdateAutoscalingPolicyRequest -from .types.clusters import AcceleratorConfig -from .types.clusters import AutoscalingConfig -from .types.clusters import Cluster -from .types.clusters import ClusterConfig -from .types.clusters import ClusterMetrics -from .types.clusters import ClusterStatus -from .types.clusters import CreateClusterRequest -from .types.clusters import DeleteClusterRequest -from .types.clusters import DiagnoseClusterRequest -from .types.clusters import DiagnoseClusterResults -from .types.clusters import DiskConfig -from .types.clusters import EncryptionConfig -from .types.clusters import EndpointConfig -from .types.clusters import GceClusterConfig -from .types.clusters import GetClusterRequest -from .types.clusters import GkeClusterConfig -from .types.clusters import InstanceGroupConfig -from .types.clusters import KerberosConfig -from .types.clusters import LifecycleConfig -from .types.clusters import ListClustersRequest -from .types.clusters import ListClustersResponse -from .types.clusters import ManagedGroupConfig -from .types.clusters import NodeInitializationAction -from .types.clusters import ReservationAffinity -from .types.clusters import SecurityConfig -from .types.clusters import SoftwareConfig -from .types.clusters import UpdateClusterRequest -from .types.jobs import CancelJobRequest -from .types.jobs import DeleteJobRequest -from .types.jobs import GetJobRequest -from .types.jobs import HadoopJob -from .types.jobs import HiveJob -from .types.jobs import Job -from .types.jobs import JobMetadata -from .types.jobs import JobPlacement -from .types.jobs import JobReference -from .types.jobs import JobScheduling -from .types.jobs import JobStatus -from .types.jobs import ListJobsRequest -from .types.jobs import ListJobsResponse -from .types.jobs import LoggingConfig -from .types.jobs import PigJob -from .types.jobs import PrestoJob -from .types.jobs import PySparkJob -from .types.jobs import QueryList -from .types.jobs import SparkJob -from .types.jobs import SparkRJob -from .types.jobs import SparkSqlJob -from .types.jobs import SubmitJobRequest -from .types.jobs import UpdateJobRequest -from .types.jobs import YarnApplication -from .types.operations import ClusterOperationMetadata -from .types.operations import ClusterOperationStatus -from .types.shared import Component -from .types.workflow_templates import ClusterOperation -from .types.workflow_templates import ClusterSelector -from .types.workflow_templates import CreateWorkflowTemplateRequest -from .types.workflow_templates import DeleteWorkflowTemplateRequest -from .types.workflow_templates import GetWorkflowTemplateRequest -from .types.workflow_templates import InstantiateInlineWorkflowTemplateRequest -from .types.workflow_templates import InstantiateWorkflowTemplateRequest -from .types.workflow_templates import ListWorkflowTemplatesRequest -from .types.workflow_templates import ListWorkflowTemplatesResponse -from .types.workflow_templates import ManagedCluster -from .types.workflow_templates import OrderedJob -from .types.workflow_templates import ParameterValidation -from .types.workflow_templates import RegexValidation -from .types.workflow_templates import TemplateParameter -from .types.workflow_templates import UpdateWorkflowTemplateRequest -from .types.workflow_templates import ValueValidation -from .types.workflow_templates import WorkflowGraph -from .types.workflow_templates import WorkflowMetadata -from .types.workflow_templates import WorkflowNode -from .types.workflow_templates import WorkflowTemplate -from .types.workflow_templates import WorkflowTemplatePlacement - -__all__ = ( - 'AutoscalingPolicyServiceAsyncClient', - 'ClusterControllerAsyncClient', - 'JobControllerAsyncClient', - 'WorkflowTemplateServiceAsyncClient', -'AcceleratorConfig', -'AutoscalingConfig', -'AutoscalingPolicy', -'AutoscalingPolicyServiceClient', -'BasicAutoscalingAlgorithm', -'BasicYarnAutoscalingConfig', -'CancelJobRequest', -'Cluster', -'ClusterConfig', -'ClusterControllerClient', -'ClusterMetrics', -'ClusterOperation', -'ClusterOperationMetadata', -'ClusterOperationStatus', -'ClusterSelector', -'ClusterStatus', -'Component', -'CreateAutoscalingPolicyRequest', -'CreateClusterRequest', -'CreateWorkflowTemplateRequest', -'DeleteAutoscalingPolicyRequest', -'DeleteClusterRequest', -'DeleteJobRequest', -'DeleteWorkflowTemplateRequest', -'DiagnoseClusterRequest', -'DiagnoseClusterResults', -'DiskConfig', -'EncryptionConfig', -'EndpointConfig', -'GceClusterConfig', -'GetAutoscalingPolicyRequest', -'GetClusterRequest', -'GetJobRequest', -'GetWorkflowTemplateRequest', -'GkeClusterConfig', -'HadoopJob', -'HiveJob', -'InstanceGroupAutoscalingPolicyConfig', -'InstanceGroupConfig', -'InstantiateInlineWorkflowTemplateRequest', -'InstantiateWorkflowTemplateRequest', -'Job', -'JobControllerClient', -'JobMetadata', -'JobPlacement', -'JobReference', -'JobScheduling', -'JobStatus', -'KerberosConfig', -'LifecycleConfig', -'ListAutoscalingPoliciesRequest', -'ListAutoscalingPoliciesResponse', -'ListClustersRequest', -'ListClustersResponse', -'ListJobsRequest', -'ListJobsResponse', -'ListWorkflowTemplatesRequest', -'ListWorkflowTemplatesResponse', -'LoggingConfig', -'ManagedCluster', -'ManagedGroupConfig', -'NodeInitializationAction', -'OrderedJob', -'ParameterValidation', -'PigJob', -'PrestoJob', -'PySparkJob', -'QueryList', -'RegexValidation', -'ReservationAffinity', -'SecurityConfig', -'SoftwareConfig', -'SparkJob', -'SparkRJob', -'SparkSqlJob', -'SubmitJobRequest', -'TemplateParameter', -'UpdateAutoscalingPolicyRequest', -'UpdateClusterRequest', -'UpdateJobRequest', -'UpdateWorkflowTemplateRequest', -'ValueValidation', -'WorkflowGraph', -'WorkflowMetadata', -'WorkflowNode', -'WorkflowTemplate', -'WorkflowTemplatePlacement', -'WorkflowTemplateServiceClient', -'YarnApplication', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/gapic_metadata.json b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/gapic_metadata.json deleted file mode 100644 index c20241a8..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/gapic_metadata.json +++ /dev/null @@ -1,315 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.dataproc_v1beta2", - "protoPackage": "google.cloud.dataproc.v1beta2", - "schema": "1.0", - "services": { - "AutoscalingPolicyService": { - "clients": { - "grpc": { - "libraryClient": "AutoscalingPolicyServiceClient", - "rpcs": { - "CreateAutoscalingPolicy": { - "methods": [ - "create_autoscaling_policy" - ] - }, - "DeleteAutoscalingPolicy": { - "methods": [ - "delete_autoscaling_policy" - ] - }, - "GetAutoscalingPolicy": { - "methods": [ - "get_autoscaling_policy" - ] - }, - "ListAutoscalingPolicies": { - "methods": [ - "list_autoscaling_policies" - ] - }, - "UpdateAutoscalingPolicy": { - "methods": [ - "update_autoscaling_policy" - ] - } - } - }, - "grpc-async": { - "libraryClient": "AutoscalingPolicyServiceAsyncClient", - "rpcs": { - "CreateAutoscalingPolicy": { - "methods": [ - "create_autoscaling_policy" - ] - }, - "DeleteAutoscalingPolicy": { - "methods": [ - "delete_autoscaling_policy" - ] - }, - "GetAutoscalingPolicy": { - "methods": [ - "get_autoscaling_policy" - ] - }, - "ListAutoscalingPolicies": { - "methods": [ - "list_autoscaling_policies" - ] - }, - "UpdateAutoscalingPolicy": { - "methods": [ - "update_autoscaling_policy" - ] - } - } - } - } - }, - "ClusterController": { - "clients": { - "grpc": { - "libraryClient": "ClusterControllerClient", - "rpcs": { - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DiagnoseCluster": { - "methods": [ - "diagnose_cluster" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - } - } - }, - "grpc-async": { - "libraryClient": "ClusterControllerAsyncClient", - "rpcs": { - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DiagnoseCluster": { - "methods": [ - "diagnose_cluster" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - } - } - } - } - }, - "JobController": { - "clients": { - "grpc": { - "libraryClient": "JobControllerClient", - "rpcs": { - "CancelJob": { - "methods": [ - "cancel_job" - ] - }, - "DeleteJob": { - "methods": [ - "delete_job" - ] - }, - "GetJob": { - "methods": [ - "get_job" - ] - }, - "ListJobs": { - "methods": [ - "list_jobs" - ] - }, - "SubmitJob": { - "methods": [ - "submit_job" - ] - }, - "SubmitJobAsOperation": { - "methods": [ - "submit_job_as_operation" - ] - }, - "UpdateJob": { - "methods": [ - "update_job" - ] - } - } - }, - "grpc-async": { - "libraryClient": "JobControllerAsyncClient", - "rpcs": { - "CancelJob": { - "methods": [ - "cancel_job" - ] - }, - "DeleteJob": { - "methods": [ - "delete_job" - ] - }, - "GetJob": { - "methods": [ - "get_job" - ] - }, - "ListJobs": { - "methods": [ - "list_jobs" - ] - }, - "SubmitJob": { - "methods": [ - "submit_job" - ] - }, - "SubmitJobAsOperation": { - "methods": [ - "submit_job_as_operation" - ] - }, - "UpdateJob": { - "methods": [ - "update_job" - ] - } - } - } - } - }, - "WorkflowTemplateService": { - "clients": { - "grpc": { - "libraryClient": "WorkflowTemplateServiceClient", - "rpcs": { - "CreateWorkflowTemplate": { - "methods": [ - "create_workflow_template" - ] - }, - "DeleteWorkflowTemplate": { - "methods": [ - "delete_workflow_template" - ] - }, - "GetWorkflowTemplate": { - "methods": [ - "get_workflow_template" - ] - }, - "InstantiateInlineWorkflowTemplate": { - "methods": [ - "instantiate_inline_workflow_template" - ] - }, - "InstantiateWorkflowTemplate": { - "methods": [ - "instantiate_workflow_template" - ] - }, - "ListWorkflowTemplates": { - "methods": [ - "list_workflow_templates" - ] - }, - "UpdateWorkflowTemplate": { - "methods": [ - "update_workflow_template" - ] - } - } - }, - "grpc-async": { - "libraryClient": "WorkflowTemplateServiceAsyncClient", - "rpcs": { - "CreateWorkflowTemplate": { - "methods": [ - "create_workflow_template" - ] - }, - "DeleteWorkflowTemplate": { - "methods": [ - "delete_workflow_template" - ] - }, - "GetWorkflowTemplate": { - "methods": [ - "get_workflow_template" - ] - }, - "InstantiateInlineWorkflowTemplate": { - "methods": [ - "instantiate_inline_workflow_template" - ] - }, - "InstantiateWorkflowTemplate": { - "methods": [ - "instantiate_workflow_template" - ] - }, - "ListWorkflowTemplates": { - "methods": [ - "list_workflow_templates" - ] - }, - "UpdateWorkflowTemplate": { - "methods": [ - "update_workflow_template" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/py.typed b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/py.typed deleted file mode 100644 index aac99cba..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-dataproc package uses inline types. diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/__init__.py deleted file mode 100644 index 4de65971..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py deleted file mode 100644 index 2401da6f..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import AutoscalingPolicyServiceClient -from .async_client import AutoscalingPolicyServiceAsyncClient - -__all__ = ( - 'AutoscalingPolicyServiceClient', - 'AutoscalingPolicyServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py deleted file mode 100644 index 140c14cc..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py +++ /dev/null @@ -1,623 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import pagers -from google.cloud.dataproc_v1beta2.types import autoscaling_policies -from .transports.base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport -from .client import AutoscalingPolicyServiceClient - - -class AutoscalingPolicyServiceAsyncClient: - """The API interface for managing autoscaling policies in the - Cloud Dataproc API. - """ - - _client: AutoscalingPolicyServiceClient - - DEFAULT_ENDPOINT = AutoscalingPolicyServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = AutoscalingPolicyServiceClient.DEFAULT_MTLS_ENDPOINT - - autoscaling_policy_path = staticmethod(AutoscalingPolicyServiceClient.autoscaling_policy_path) - parse_autoscaling_policy_path = staticmethod(AutoscalingPolicyServiceClient.parse_autoscaling_policy_path) - common_billing_account_path = staticmethod(AutoscalingPolicyServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(AutoscalingPolicyServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(AutoscalingPolicyServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_organization_path) - common_project_path = staticmethod(AutoscalingPolicyServiceClient.common_project_path) - parse_common_project_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_project_path) - common_location_path = staticmethod(AutoscalingPolicyServiceClient.common_location_path) - parse_common_location_path = staticmethod(AutoscalingPolicyServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoscalingPolicyServiceAsyncClient: The constructed client. - """ - return AutoscalingPolicyServiceClient.from_service_account_info.__func__(AutoscalingPolicyServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoscalingPolicyServiceAsyncClient: The constructed client. - """ - return AutoscalingPolicyServiceClient.from_service_account_file.__func__(AutoscalingPolicyServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> AutoscalingPolicyServiceTransport: - """Returns the transport used by the client instance. - - Returns: - AutoscalingPolicyServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(AutoscalingPolicyServiceClient).get_transport_class, type(AutoscalingPolicyServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, AutoscalingPolicyServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the autoscaling policy service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.AutoscalingPolicyServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = AutoscalingPolicyServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_autoscaling_policy(self, - request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, - *, - parent: str = None, - policy: autoscaling_policies.AutoscalingPolicy = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> autoscaling_policies.AutoscalingPolicy: - r"""Creates new autoscaling policy. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.CreateAutoscalingPolicyRequest`): - The request object. A request to create an autoscaling - policy. - parent (:class:`str`): - Required. The "resource name" of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.create``, - the resource name has the following format: - ``projects/{project_id}/regions/{region}`` - - - For - ``projects.locations.autoscalingPolicies.create``, - the resource name has the following format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - policy (:class:`google.cloud.dataproc_v1beta2.types.AutoscalingPolicy`): - Required. The autoscaling policy to - create. - - This corresponds to the ``policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: - Describes an autoscaling policy for - Dataproc cluster autoscaler. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, policy]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = autoscaling_policies.CreateAutoscalingPolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if policy is not None: - request.policy = policy - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_autoscaling_policy, - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_autoscaling_policy(self, - request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, - *, - policy: autoscaling_policies.AutoscalingPolicy = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> autoscaling_policies.AutoscalingPolicy: - r"""Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.UpdateAutoscalingPolicyRequest`): - The request object. A request to update an autoscaling - policy. - policy (:class:`google.cloud.dataproc_v1beta2.types.AutoscalingPolicy`): - Required. The updated autoscaling - policy. - - This corresponds to the ``policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: - Describes an autoscaling policy for - Dataproc cluster autoscaler. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([policy]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if policy is not None: - request.policy = policy - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_autoscaling_policy, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("policy.name", request.policy.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_autoscaling_policy(self, - request: autoscaling_policies.GetAutoscalingPolicyRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> autoscaling_policies.AutoscalingPolicy: - r"""Retrieves autoscaling policy. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.GetAutoscalingPolicyRequest`): - The request object. A request to fetch an autoscaling - policy. - name (:class:`str`): - Required. The "resource name" of the autoscaling policy, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.get``, the - resource name of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies.get``, - the resource name of the policy has the following - format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: - Describes an autoscaling policy for - Dataproc cluster autoscaler. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = autoscaling_policies.GetAutoscalingPolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_autoscaling_policy, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_autoscaling_policies(self, - request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAutoscalingPoliciesAsyncPager: - r"""Lists autoscaling policies in the project. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesRequest`): - The request object. A request to list autoscaling - policies in a project. - parent (:class:`str`): - Required. The "resource name" of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.list``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.list``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.pagers.ListAutoscalingPoliciesAsyncPager: - A response to a request to list - autoscaling policies in a project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_autoscaling_policies, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListAutoscalingPoliciesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_autoscaling_policy(self, - request: autoscaling_policies.DeleteAutoscalingPolicyRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes an autoscaling policy. It is an error to - delete an autoscaling policy that is in use by one or - more clusters. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.DeleteAutoscalingPolicyRequest`): - The request object. A request to delete an autoscaling - policy. - Autoscaling policies in use by one or more clusters will - not be deleted. - name (:class:`str`): - Required. The "resource name" of the autoscaling policy, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.delete``, - the resource name of the policy has the following - format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For - ``projects.locations.autoscalingPolicies.delete``, - the resource name of the policy has the following - format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = autoscaling_policies.DeleteAutoscalingPolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_autoscaling_policy, - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "AutoscalingPolicyServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py deleted file mode 100644 index 17b376c4..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py +++ /dev/null @@ -1,789 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import pagers -from google.cloud.dataproc_v1beta2.types import autoscaling_policies -from .transports.base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import AutoscalingPolicyServiceGrpcTransport -from .transports.grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport - - -class AutoscalingPolicyServiceClientMeta(type): - """Metaclass for the AutoscalingPolicyService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[AutoscalingPolicyServiceTransport]] - _transport_registry["grpc"] = AutoscalingPolicyServiceGrpcTransport - _transport_registry["grpc_asyncio"] = AutoscalingPolicyServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[AutoscalingPolicyServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class AutoscalingPolicyServiceClient(metaclass=AutoscalingPolicyServiceClientMeta): - """The API interface for managing autoscaling policies in the - Cloud Dataproc API. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "dataproc.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoscalingPolicyServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoscalingPolicyServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> AutoscalingPolicyServiceTransport: - """Returns the transport used by the client instance. - - Returns: - AutoscalingPolicyServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def autoscaling_policy_path(project: str,location: str,autoscaling_policy: str,) -> str: - """Returns a fully-qualified autoscaling_policy string.""" - return "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format(project=project, location=location, autoscaling_policy=autoscaling_policy, ) - - @staticmethod - def parse_autoscaling_policy_path(path: str) -> Dict[str,str]: - """Parses a autoscaling_policy path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/autoscalingPolicies/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, AutoscalingPolicyServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the autoscaling policy service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, AutoscalingPolicyServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, AutoscalingPolicyServiceTransport): - # transport is a AutoscalingPolicyServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_autoscaling_policy(self, - request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, - *, - parent: str = None, - policy: autoscaling_policies.AutoscalingPolicy = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> autoscaling_policies.AutoscalingPolicy: - r"""Creates new autoscaling policy. - - Args: - request (google.cloud.dataproc_v1beta2.types.CreateAutoscalingPolicyRequest): - The request object. A request to create an autoscaling - policy. - parent (str): - Required. The "resource name" of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.create``, - the resource name has the following format: - ``projects/{project_id}/regions/{region}`` - - - For - ``projects.locations.autoscalingPolicies.create``, - the resource name has the following format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - policy (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): - Required. The autoscaling policy to - create. - - This corresponds to the ``policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: - Describes an autoscaling policy for - Dataproc cluster autoscaler. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, policy]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a autoscaling_policies.CreateAutoscalingPolicyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, autoscaling_policies.CreateAutoscalingPolicyRequest): - request = autoscaling_policies.CreateAutoscalingPolicyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if policy is not None: - request.policy = policy - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_autoscaling_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_autoscaling_policy(self, - request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, - *, - policy: autoscaling_policies.AutoscalingPolicy = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> autoscaling_policies.AutoscalingPolicy: - r"""Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. - - Args: - request (google.cloud.dataproc_v1beta2.types.UpdateAutoscalingPolicyRequest): - The request object. A request to update an autoscaling - policy. - policy (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): - Required. The updated autoscaling - policy. - - This corresponds to the ``policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: - Describes an autoscaling policy for - Dataproc cluster autoscaler. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([policy]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a autoscaling_policies.UpdateAutoscalingPolicyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, autoscaling_policies.UpdateAutoscalingPolicyRequest): - request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if policy is not None: - request.policy = policy - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_autoscaling_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("policy.name", request.policy.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_autoscaling_policy(self, - request: autoscaling_policies.GetAutoscalingPolicyRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> autoscaling_policies.AutoscalingPolicy: - r"""Retrieves autoscaling policy. - - Args: - request (google.cloud.dataproc_v1beta2.types.GetAutoscalingPolicyRequest): - The request object. A request to fetch an autoscaling - policy. - name (str): - Required. The "resource name" of the autoscaling policy, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.get``, the - resource name of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies.get``, - the resource name of the policy has the following - format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: - Describes an autoscaling policy for - Dataproc cluster autoscaler. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a autoscaling_policies.GetAutoscalingPolicyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, autoscaling_policies.GetAutoscalingPolicyRequest): - request = autoscaling_policies.GetAutoscalingPolicyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_autoscaling_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_autoscaling_policies(self, - request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAutoscalingPoliciesPager: - r"""Lists autoscaling policies in the project. - - Args: - request (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesRequest): - The request object. A request to list autoscaling - policies in a project. - parent (str): - Required. The "resource name" of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.list``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.list``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.pagers.ListAutoscalingPoliciesPager: - A response to a request to list - autoscaling policies in a project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a autoscaling_policies.ListAutoscalingPoliciesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, autoscaling_policies.ListAutoscalingPoliciesRequest): - request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_autoscaling_policies] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListAutoscalingPoliciesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_autoscaling_policy(self, - request: autoscaling_policies.DeleteAutoscalingPolicyRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes an autoscaling policy. It is an error to - delete an autoscaling policy that is in use by one or - more clusters. - - Args: - request (google.cloud.dataproc_v1beta2.types.DeleteAutoscalingPolicyRequest): - The request object. A request to delete an autoscaling - policy. - Autoscaling policies in use by one or more clusters will - not be deleted. - name (str): - Required. The "resource name" of the autoscaling policy, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.delete``, - the resource name of the policy has the following - format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For - ``projects.locations.autoscalingPolicies.delete``, - the resource name of the policy has the following - format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a autoscaling_policies.DeleteAutoscalingPolicyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, autoscaling_policies.DeleteAutoscalingPolicyRequest): - request = autoscaling_policies.DeleteAutoscalingPolicyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_autoscaling_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "AutoscalingPolicyServiceClient", -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py deleted file mode 100644 index 58533089..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.dataproc_v1beta2.types import autoscaling_policies - - -class ListAutoscalingPoliciesPager: - """A pager for iterating through ``list_autoscaling_policies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``policies`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListAutoscalingPolicies`` requests and continue to iterate - through the ``policies`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., autoscaling_policies.ListAutoscalingPoliciesResponse], - request: autoscaling_policies.ListAutoscalingPoliciesRequest, - response: autoscaling_policies.ListAutoscalingPoliciesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesRequest): - The initial request object. - response (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[autoscaling_policies.ListAutoscalingPoliciesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[autoscaling_policies.AutoscalingPolicy]: - for page in self.pages: - yield from page.policies - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListAutoscalingPoliciesAsyncPager: - """A pager for iterating through ``list_autoscaling_policies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``policies`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListAutoscalingPolicies`` requests and continue to iterate - through the ``policies`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse]], - request: autoscaling_policies.ListAutoscalingPoliciesRequest, - response: autoscaling_policies.ListAutoscalingPoliciesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesRequest): - The initial request object. - response (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[autoscaling_policies.ListAutoscalingPoliciesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[autoscaling_policies.AutoscalingPolicy]: - async def async_generator(): - async for page in self.pages: - for response in page.policies: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py deleted file mode 100644 index 55ea5b98..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import AutoscalingPolicyServiceTransport -from .grpc import AutoscalingPolicyServiceGrpcTransport -from .grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[AutoscalingPolicyServiceTransport]] -_transport_registry['grpc'] = AutoscalingPolicyServiceGrpcTransport -_transport_registry['grpc_asyncio'] = AutoscalingPolicyServiceGrpcAsyncIOTransport - -__all__ = ( - 'AutoscalingPolicyServiceTransport', - 'AutoscalingPolicyServiceGrpcTransport', - 'AutoscalingPolicyServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py deleted file mode 100644 index 8f916ab4..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py +++ /dev/null @@ -1,246 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.dataproc_v1beta2.types import autoscaling_policies -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-dataproc', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class AutoscalingPolicyServiceTransport(abc.ABC): - """Abstract transport class for AutoscalingPolicyService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'dataproc.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_autoscaling_policy: gapic_v1.method.wrap_method( - self.create_autoscaling_policy, - default_timeout=600.0, - client_info=client_info, - ), - self.update_autoscaling_policy: gapic_v1.method.wrap_method( - self.update_autoscaling_policy, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.get_autoscaling_policy: gapic_v1.method.wrap_method( - self.get_autoscaling_policy, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.list_autoscaling_policies: gapic_v1.method.wrap_method( - self.list_autoscaling_policies, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.delete_autoscaling_policy: gapic_v1.method.wrap_method( - self.delete_autoscaling_policy, - default_timeout=600.0, - client_info=client_info, - ), - } - - @property - def create_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.CreateAutoscalingPolicyRequest], - Union[ - autoscaling_policies.AutoscalingPolicy, - Awaitable[autoscaling_policies.AutoscalingPolicy] - ]]: - raise NotImplementedError() - - @property - def update_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.UpdateAutoscalingPolicyRequest], - Union[ - autoscaling_policies.AutoscalingPolicy, - Awaitable[autoscaling_policies.AutoscalingPolicy] - ]]: - raise NotImplementedError() - - @property - def get_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.GetAutoscalingPolicyRequest], - Union[ - autoscaling_policies.AutoscalingPolicy, - Awaitable[autoscaling_policies.AutoscalingPolicy] - ]]: - raise NotImplementedError() - - @property - def list_autoscaling_policies(self) -> Callable[ - [autoscaling_policies.ListAutoscalingPoliciesRequest], - Union[ - autoscaling_policies.ListAutoscalingPoliciesResponse, - Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.DeleteAutoscalingPolicyRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'AutoscalingPolicyServiceTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py deleted file mode 100644 index 5e1754bd..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py +++ /dev/null @@ -1,363 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.dataproc_v1beta2.types import autoscaling_policies -from google.protobuf import empty_pb2 # type: ignore -from .base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO - - -class AutoscalingPolicyServiceGrpcTransport(AutoscalingPolicyServiceTransport): - """gRPC backend transport for AutoscalingPolicyService. - - The API interface for managing autoscaling policies in the - Cloud Dataproc API. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def create_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.CreateAutoscalingPolicyRequest], - autoscaling_policies.AutoscalingPolicy]: - r"""Return a callable for the create autoscaling policy method over gRPC. - - Creates new autoscaling policy. - - Returns: - Callable[[~.CreateAutoscalingPolicyRequest], - ~.AutoscalingPolicy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_autoscaling_policy' not in self._stubs: - self._stubs['create_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/CreateAutoscalingPolicy', - request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize, - response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, - ) - return self._stubs['create_autoscaling_policy'] - - @property - def update_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.UpdateAutoscalingPolicyRequest], - autoscaling_policies.AutoscalingPolicy]: - r"""Return a callable for the update autoscaling policy method over gRPC. - - Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. - - Returns: - Callable[[~.UpdateAutoscalingPolicyRequest], - ~.AutoscalingPolicy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_autoscaling_policy' not in self._stubs: - self._stubs['update_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/UpdateAutoscalingPolicy', - request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize, - response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, - ) - return self._stubs['update_autoscaling_policy'] - - @property - def get_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.GetAutoscalingPolicyRequest], - autoscaling_policies.AutoscalingPolicy]: - r"""Return a callable for the get autoscaling policy method over gRPC. - - Retrieves autoscaling policy. - - Returns: - Callable[[~.GetAutoscalingPolicyRequest], - ~.AutoscalingPolicy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_autoscaling_policy' not in self._stubs: - self._stubs['get_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/GetAutoscalingPolicy', - request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize, - response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, - ) - return self._stubs['get_autoscaling_policy'] - - @property - def list_autoscaling_policies(self) -> Callable[ - [autoscaling_policies.ListAutoscalingPoliciesRequest], - autoscaling_policies.ListAutoscalingPoliciesResponse]: - r"""Return a callable for the list autoscaling policies method over gRPC. - - Lists autoscaling policies in the project. - - Returns: - Callable[[~.ListAutoscalingPoliciesRequest], - ~.ListAutoscalingPoliciesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_autoscaling_policies' not in self._stubs: - self._stubs['list_autoscaling_policies'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/ListAutoscalingPolicies', - request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize, - response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize, - ) - return self._stubs['list_autoscaling_policies'] - - @property - def delete_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.DeleteAutoscalingPolicyRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete autoscaling policy method over gRPC. - - Deletes an autoscaling policy. It is an error to - delete an autoscaling policy that is in use by one or - more clusters. - - Returns: - Callable[[~.DeleteAutoscalingPolicyRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_autoscaling_policy' not in self._stubs: - self._stubs['delete_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/DeleteAutoscalingPolicy', - request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_autoscaling_policy'] - - -__all__ = ( - 'AutoscalingPolicyServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py deleted file mode 100644 index a7fc6b5c..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,367 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.dataproc_v1beta2.types import autoscaling_policies -from google.protobuf import empty_pb2 # type: ignore -from .base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import AutoscalingPolicyServiceGrpcTransport - - -class AutoscalingPolicyServiceGrpcAsyncIOTransport(AutoscalingPolicyServiceTransport): - """gRPC AsyncIO backend transport for AutoscalingPolicyService. - - The API interface for managing autoscaling policies in the - Cloud Dataproc API. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def create_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.CreateAutoscalingPolicyRequest], - Awaitable[autoscaling_policies.AutoscalingPolicy]]: - r"""Return a callable for the create autoscaling policy method over gRPC. - - Creates new autoscaling policy. - - Returns: - Callable[[~.CreateAutoscalingPolicyRequest], - Awaitable[~.AutoscalingPolicy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_autoscaling_policy' not in self._stubs: - self._stubs['create_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/CreateAutoscalingPolicy', - request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize, - response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, - ) - return self._stubs['create_autoscaling_policy'] - - @property - def update_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.UpdateAutoscalingPolicyRequest], - Awaitable[autoscaling_policies.AutoscalingPolicy]]: - r"""Return a callable for the update autoscaling policy method over gRPC. - - Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. - - Returns: - Callable[[~.UpdateAutoscalingPolicyRequest], - Awaitable[~.AutoscalingPolicy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_autoscaling_policy' not in self._stubs: - self._stubs['update_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/UpdateAutoscalingPolicy', - request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize, - response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, - ) - return self._stubs['update_autoscaling_policy'] - - @property - def get_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.GetAutoscalingPolicyRequest], - Awaitable[autoscaling_policies.AutoscalingPolicy]]: - r"""Return a callable for the get autoscaling policy method over gRPC. - - Retrieves autoscaling policy. - - Returns: - Callable[[~.GetAutoscalingPolicyRequest], - Awaitable[~.AutoscalingPolicy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_autoscaling_policy' not in self._stubs: - self._stubs['get_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/GetAutoscalingPolicy', - request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize, - response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, - ) - return self._stubs['get_autoscaling_policy'] - - @property - def list_autoscaling_policies(self) -> Callable[ - [autoscaling_policies.ListAutoscalingPoliciesRequest], - Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse]]: - r"""Return a callable for the list autoscaling policies method over gRPC. - - Lists autoscaling policies in the project. - - Returns: - Callable[[~.ListAutoscalingPoliciesRequest], - Awaitable[~.ListAutoscalingPoliciesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_autoscaling_policies' not in self._stubs: - self._stubs['list_autoscaling_policies'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/ListAutoscalingPolicies', - request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize, - response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize, - ) - return self._stubs['list_autoscaling_policies'] - - @property - def delete_autoscaling_policy(self) -> Callable[ - [autoscaling_policies.DeleteAutoscalingPolicyRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete autoscaling policy method over gRPC. - - Deletes an autoscaling policy. It is an error to - delete an autoscaling policy that is in use by one or - more clusters. - - Returns: - Callable[[~.DeleteAutoscalingPolicyRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_autoscaling_policy' not in self._stubs: - self._stubs['delete_autoscaling_policy'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/DeleteAutoscalingPolicy', - request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_autoscaling_policy'] - - -__all__ = ( - 'AutoscalingPolicyServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py deleted file mode 100644 index 4b4a11d5..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import ClusterControllerClient -from .async_client import ClusterControllerAsyncClient - -__all__ = ( - 'ClusterControllerClient', - 'ClusterControllerAsyncClient', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py deleted file mode 100644 index 4921e9e6..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py +++ /dev/null @@ -1,923 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.dataproc_v1beta2.services.cluster_controller import pagers -from google.cloud.dataproc_v1beta2.types import clusters -from google.cloud.dataproc_v1beta2.types import operations -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from .transports.base import ClusterControllerTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport -from .client import ClusterControllerClient - - -class ClusterControllerAsyncClient: - """The ClusterControllerService provides methods to manage - clusters of Compute Engine instances. - """ - - _client: ClusterControllerClient - - DEFAULT_ENDPOINT = ClusterControllerClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = ClusterControllerClient.DEFAULT_MTLS_ENDPOINT - - cluster_path = staticmethod(ClusterControllerClient.cluster_path) - parse_cluster_path = staticmethod(ClusterControllerClient.parse_cluster_path) - common_billing_account_path = staticmethod(ClusterControllerClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(ClusterControllerClient.parse_common_billing_account_path) - common_folder_path = staticmethod(ClusterControllerClient.common_folder_path) - parse_common_folder_path = staticmethod(ClusterControllerClient.parse_common_folder_path) - common_organization_path = staticmethod(ClusterControllerClient.common_organization_path) - parse_common_organization_path = staticmethod(ClusterControllerClient.parse_common_organization_path) - common_project_path = staticmethod(ClusterControllerClient.common_project_path) - parse_common_project_path = staticmethod(ClusterControllerClient.parse_common_project_path) - common_location_path = staticmethod(ClusterControllerClient.common_location_path) - parse_common_location_path = staticmethod(ClusterControllerClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterControllerAsyncClient: The constructed client. - """ - return ClusterControllerClient.from_service_account_info.__func__(ClusterControllerAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterControllerAsyncClient: The constructed client. - """ - return ClusterControllerClient.from_service_account_file.__func__(ClusterControllerAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ClusterControllerTransport: - """Returns the transport used by the client instance. - - Returns: - ClusterControllerTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(ClusterControllerClient).get_transport_class, type(ClusterControllerClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, ClusterControllerTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the cluster controller client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.ClusterControllerTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = ClusterControllerClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_cluster(self, - request: clusters.CreateClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster: clusters.Cluster = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.CreateClusterRequest`): - The request object. A request to create a cluster. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (:class:`google.cloud.dataproc_v1beta2.types.Cluster`): - Required. The cluster to create. - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.dataproc_v1beta2.types.Cluster` Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = clusters.CreateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster is not None: - request.cluster = cluster - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - clusters.Cluster, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - async def update_cluster(self, - request: clusters.UpdateClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - cluster: clusters.Cluster = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.UpdateClusterRequest`): - The request object. A request to update a cluster. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project the cluster belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (:class:`str`): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (:class:`google.cloud.dataproc_v1beta2.types.Cluster`): - Required. The changes to the cluster. - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Specifies the path, relative to ``Cluster``, - of the field to update. For example, to change the - number of workers in a cluster to 5, the ``update_mask`` - parameter would be specified as - ``config.worker_config.num_instances``, and the - ``PATCH`` request body would specify the new value, as - follows: - - :: - - { - "config":{ - "workerConfig":{ - "numInstances":"5" - } - } - } - - Similarly, to change the number of preemptible workers - in a cluster to 5, the ``update_mask`` parameter would - be ``config.secondary_worker_config.num_instances``, and - the ``PATCH`` request body would be set as follows: - - :: - - { - "config":{ - "secondaryWorkerConfig":{ - "numInstances":"5" - } - } - } - - Note: currently only the following fields can be - updated: - - .. raw:: html - - - - - - - - - - - - - - - - - - - - - - - - - - -
MaskPurpose
labelsUpdates labels
config.worker_config.num_instancesResize primary worker - group
config.secondary_worker_config.num_instancesResize secondary - worker group
config.lifecycle_config.auto_delete_ttlReset MAX TTL - duration
config.lifecycle_config.auto_delete_timeUpdate MAX TTL - deletion timestamp
config.lifecycle_config.idle_delete_ttlUpdate Idle TTL - duration
config.autoscaling_config.policy_uriUse, stop using, or change - autoscaling policies
- - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.dataproc_v1beta2.types.Cluster` Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name, cluster, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = clusters.UpdateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - if cluster is not None: - request.cluster = cluster - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - clusters.Cluster, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - async def delete_cluster(self, - request: clusters.DeleteClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.DeleteClusterRequest`): - The request object. A request to delete a cluster. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (:class:`str`): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = clusters.DeleteClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_cluster(self, - request: clusters.GetClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> clusters.Cluster: - r"""Gets the resource representation for a cluster in a - project. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.GetClusterRequest`): - The request object. Request to get the resource - representation for a cluster in a project. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (:class:`str`): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.Cluster: - Describes the identifying - information, config, and status of a - cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = clusters.GetClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_clusters(self, - request: clusters.ListClustersRequest = None, - *, - project_id: str = None, - region: str = None, - filter: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListClustersAsyncPager: - r"""Lists all regions/{region}/clusters in a project - alphabetically. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.ListClustersRequest`): - The request object. A request to list the clusters in a - project. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - filter (:class:`str`): - Optional. A filter constraining the clusters to list. - Filters are case-sensitive and have the following - syntax: - - field = value [AND [field = value]] ... - - where **field** is one of ``status.state``, - ``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a - label key. **value** can be ``*`` to match all values. - ``status.state`` can be one of the following: - ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, - ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` - contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` - states. ``INACTIVE`` contains the ``DELETING`` and - ``ERROR`` states. ``clusterName`` is the name of the - cluster provided at creation time. Only the logical - ``AND`` operator is supported; space-separated items are - treated as having an implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND clusterName = mycluster AND - labels.env = staging AND labels.starred = \* - - This corresponds to the ``filter`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.services.cluster_controller.pagers.ListClustersAsyncPager: - The list of all clusters in a - project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, filter]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = clusters.ListClustersRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if filter is not None: - request.filter = filter - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_clusters, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListClustersAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def diagnose_cluster(self, - request: clusters.DiagnoseClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Gets cluster diagnostic information. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - After the operation completes, - [Operation.response][google.longrunning.Operation.response] - contains [Empty][google.protobuf.Empty]. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.DiagnoseClusterRequest`): - The request object. A request to collect cluster - diagnostic information. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (:class:`str`): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = clusters.DiagnoseClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.diagnose_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ClusterControllerAsyncClient", -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py deleted file mode 100644 index dbfd3969..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py +++ /dev/null @@ -1,1070 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.dataproc_v1beta2.services.cluster_controller import pagers -from google.cloud.dataproc_v1beta2.types import clusters -from google.cloud.dataproc_v1beta2.types import operations -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from .transports.base import ClusterControllerTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import ClusterControllerGrpcTransport -from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport - - -class ClusterControllerClientMeta(type): - """Metaclass for the ClusterController client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[ClusterControllerTransport]] - _transport_registry["grpc"] = ClusterControllerGrpcTransport - _transport_registry["grpc_asyncio"] = ClusterControllerGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[ClusterControllerTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class ClusterControllerClient(metaclass=ClusterControllerClientMeta): - """The ClusterControllerService provides methods to manage - clusters of Compute Engine instances. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "dataproc.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterControllerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterControllerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ClusterControllerTransport: - """Returns the transport used by the client instance. - - Returns: - ClusterControllerTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def cluster_path(project: str,location: str,cluster: str,) -> str: - """Returns a fully-qualified cluster string.""" - return "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) - - @staticmethod - def parse_cluster_path(path: str) -> Dict[str,str]: - """Parses a cluster path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, ClusterControllerTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the cluster controller client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ClusterControllerTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, ClusterControllerTransport): - # transport is a ClusterControllerTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_cluster(self, - request: clusters.CreateClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster: clusters.Cluster = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Creates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Args: - request (google.cloud.dataproc_v1beta2.types.CreateClusterRequest): - The request object. A request to create a cluster. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (google.cloud.dataproc_v1beta2.types.Cluster): - Required. The cluster to create. - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.dataproc_v1beta2.types.Cluster` Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a clusters.CreateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.CreateClusterRequest): - request = clusters.CreateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster is not None: - request.cluster = cluster - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_cluster] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - clusters.Cluster, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - def update_cluster(self, - request: clusters.UpdateClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - cluster: clusters.Cluster = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Updates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Args: - request (google.cloud.dataproc_v1beta2.types.UpdateClusterRequest): - The request object. A request to update a cluster. - project_id (str): - Required. The ID of the Google Cloud - Platform project the cluster belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (str): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (google.cloud.dataproc_v1beta2.types.Cluster): - Required. The changes to the cluster. - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Specifies the path, relative to ``Cluster``, - of the field to update. For example, to change the - number of workers in a cluster to 5, the ``update_mask`` - parameter would be specified as - ``config.worker_config.num_instances``, and the - ``PATCH`` request body would specify the new value, as - follows: - - :: - - { - "config":{ - "workerConfig":{ - "numInstances":"5" - } - } - } - - Similarly, to change the number of preemptible workers - in a cluster to 5, the ``update_mask`` parameter would - be ``config.secondary_worker_config.num_instances``, and - the ``PATCH`` request body would be set as follows: - - :: - - { - "config":{ - "secondaryWorkerConfig":{ - "numInstances":"5" - } - } - } - - Note: currently only the following fields can be - updated: - - .. raw:: html - - - - - - - - - - - - - - - - - - - - - - - - - - -
MaskPurpose
labelsUpdates labels
config.worker_config.num_instancesResize primary worker - group
config.secondary_worker_config.num_instancesResize secondary - worker group
config.lifecycle_config.auto_delete_ttlReset MAX TTL - duration
config.lifecycle_config.auto_delete_timeUpdate MAX TTL - deletion timestamp
config.lifecycle_config.idle_delete_ttlUpdate Idle TTL - duration
config.autoscaling_config.policy_uriUse, stop using, or change - autoscaling policies
- - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.dataproc_v1beta2.types.Cluster` Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name, cluster, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a clusters.UpdateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.UpdateClusterRequest): - request = clusters.UpdateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - if cluster is not None: - request.cluster = cluster - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_cluster] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - clusters.Cluster, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - def delete_cluster(self, - request: clusters.DeleteClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Deletes a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Args: - request (google.cloud.dataproc_v1beta2.types.DeleteClusterRequest): - The request object. A request to delete a cluster. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (str): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a clusters.DeleteClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.DeleteClusterRequest): - request = clusters.DeleteClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_cluster] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - def get_cluster(self, - request: clusters.GetClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> clusters.Cluster: - r"""Gets the resource representation for a cluster in a - project. - - Args: - request (google.cloud.dataproc_v1beta2.types.GetClusterRequest): - The request object. Request to get the resource - representation for a cluster in a project. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (str): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.Cluster: - Describes the identifying - information, config, and status of a - cluster of Compute Engine instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a clusters.GetClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.GetClusterRequest): - request = clusters.GetClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_cluster] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_clusters(self, - request: clusters.ListClustersRequest = None, - *, - project_id: str = None, - region: str = None, - filter: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListClustersPager: - r"""Lists all regions/{region}/clusters in a project - alphabetically. - - Args: - request (google.cloud.dataproc_v1beta2.types.ListClustersRequest): - The request object. A request to list the clusters in a - project. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - filter (str): - Optional. A filter constraining the clusters to list. - Filters are case-sensitive and have the following - syntax: - - field = value [AND [field = value]] ... - - where **field** is one of ``status.state``, - ``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a - label key. **value** can be ``*`` to match all values. - ``status.state`` can be one of the following: - ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, - ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` - contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` - states. ``INACTIVE`` contains the ``DELETING`` and - ``ERROR`` states. ``clusterName`` is the name of the - cluster provided at creation time. Only the logical - ``AND`` operator is supported; space-separated items are - treated as having an implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND clusterName = mycluster AND - labels.env = staging AND labels.starred = \* - - This corresponds to the ``filter`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.services.cluster_controller.pagers.ListClustersPager: - The list of all clusters in a - project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, filter]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a clusters.ListClustersRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.ListClustersRequest): - request = clusters.ListClustersRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if filter is not None: - request.filter = filter - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_clusters] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListClustersPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def diagnose_cluster(self, - request: clusters.DiagnoseClusterRequest = None, - *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Gets cluster diagnostic information. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - After the operation completes, - [Operation.response][google.longrunning.Operation.response] - contains [Empty][google.protobuf.Empty]. - - Args: - request (google.cloud.dataproc_v1beta2.types.DiagnoseClusterRequest): - The request object. A request to collect cluster - diagnostic information. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the cluster - belongs to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_name (str): - Required. The cluster name. - This corresponds to the ``cluster_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, cluster_name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a clusters.DiagnoseClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, clusters.DiagnoseClusterRequest): - request = clusters.DiagnoseClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if cluster_name is not None: - request.cluster_name = cluster_name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.diagnose_cluster] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.ClusterOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ClusterControllerClient", -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py deleted file mode 100644 index 10f0cc72..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.dataproc_v1beta2.types import clusters - - -class ListClustersPager: - """A pager for iterating through ``list_clusters`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1beta2.types.ListClustersResponse` object, and - provides an ``__iter__`` method to iterate through its - ``clusters`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListClusters`` requests and continue to iterate - through the ``clusters`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1beta2.types.ListClustersResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., clusters.ListClustersResponse], - request: clusters.ListClustersRequest, - response: clusters.ListClustersResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1beta2.types.ListClustersRequest): - The initial request object. - response (google.cloud.dataproc_v1beta2.types.ListClustersResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = clusters.ListClustersRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[clusters.ListClustersResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[clusters.Cluster]: - for page in self.pages: - yield from page.clusters - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListClustersAsyncPager: - """A pager for iterating through ``list_clusters`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1beta2.types.ListClustersResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``clusters`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListClusters`` requests and continue to iterate - through the ``clusters`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1beta2.types.ListClustersResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[clusters.ListClustersResponse]], - request: clusters.ListClustersRequest, - response: clusters.ListClustersResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1beta2.types.ListClustersRequest): - The initial request object. - response (google.cloud.dataproc_v1beta2.types.ListClustersResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = clusters.ListClustersRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[clusters.ListClustersResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[clusters.Cluster]: - async def async_generator(): - async for page in self.pages: - for response in page.clusters: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py deleted file mode 100644 index 9c44d271..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import ClusterControllerTransport -from .grpc import ClusterControllerGrpcTransport -from .grpc_asyncio import ClusterControllerGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterControllerTransport]] -_transport_registry['grpc'] = ClusterControllerGrpcTransport -_transport_registry['grpc_asyncio'] = ClusterControllerGrpcAsyncIOTransport - -__all__ = ( - 'ClusterControllerTransport', - 'ClusterControllerGrpcTransport', - 'ClusterControllerGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py deleted file mode 100644 index d4a71b3e..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py +++ /dev/null @@ -1,285 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.dataproc_v1beta2.types import clusters -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-dataproc', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class ClusterControllerTransport(abc.ABC): - """Abstract transport class for ClusterController.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'dataproc.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_cluster: gapic_v1.method.wrap_method( - self.create_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=client_info, - ), - self.update_cluster: gapic_v1.method.wrap_method( - self.update_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=client_info, - ), - self.delete_cluster: gapic_v1.method.wrap_method( - self.delete_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=client_info, - ), - self.get_cluster: gapic_v1.method.wrap_method( - self.get_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=client_info, - ), - self.list_clusters: gapic_v1.method.wrap_method( - self.list_clusters, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=client_info, - ), - self.diagnose_cluster: gapic_v1.method.wrap_method( - self.diagnose_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=300.0, - ), - default_timeout=300.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_cluster(self) -> Callable[ - [clusters.CreateClusterRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def update_cluster(self) -> Callable[ - [clusters.UpdateClusterRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_cluster(self) -> Callable[ - [clusters.DeleteClusterRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_cluster(self) -> Callable[ - [clusters.GetClusterRequest], - Union[ - clusters.Cluster, - Awaitable[clusters.Cluster] - ]]: - raise NotImplementedError() - - @property - def list_clusters(self) -> Callable[ - [clusters.ListClustersRequest], - Union[ - clusters.ListClustersResponse, - Awaitable[clusters.ListClustersResponse] - ]]: - raise NotImplementedError() - - @property - def diagnose_cluster(self) -> Callable[ - [clusters.DiagnoseClusterRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'ClusterControllerTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py deleted file mode 100644 index c7429251..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py +++ /dev/null @@ -1,419 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.dataproc_v1beta2.types import clusters -from google.longrunning import operations_pb2 # type: ignore -from .base import ClusterControllerTransport, DEFAULT_CLIENT_INFO - - -class ClusterControllerGrpcTransport(ClusterControllerTransport): - """gRPC backend transport for ClusterController. - - The ClusterControllerService provides methods to manage - clusters of Compute Engine instances. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_cluster(self) -> Callable[ - [clusters.CreateClusterRequest], - operations_pb2.Operation]: - r"""Return a callable for the create cluster method over gRPC. - - Creates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Returns: - Callable[[~.CreateClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_cluster' not in self._stubs: - self._stubs['create_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster', - request_serializer=clusters.CreateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_cluster'] - - @property - def update_cluster(self) -> Callable[ - [clusters.UpdateClusterRequest], - operations_pb2.Operation]: - r"""Return a callable for the update cluster method over gRPC. - - Updates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Returns: - Callable[[~.UpdateClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_cluster' not in self._stubs: - self._stubs['update_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster', - request_serializer=clusters.UpdateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_cluster'] - - @property - def delete_cluster(self) -> Callable[ - [clusters.DeleteClusterRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete cluster method over gRPC. - - Deletes a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Returns: - Callable[[~.DeleteClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_cluster' not in self._stubs: - self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster', - request_serializer=clusters.DeleteClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_cluster'] - - @property - def get_cluster(self) -> Callable[ - [clusters.GetClusterRequest], - clusters.Cluster]: - r"""Return a callable for the get cluster method over gRPC. - - Gets the resource representation for a cluster in a - project. - - Returns: - Callable[[~.GetClusterRequest], - ~.Cluster]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_cluster' not in self._stubs: - self._stubs['get_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.ClusterController/GetCluster', - request_serializer=clusters.GetClusterRequest.serialize, - response_deserializer=clusters.Cluster.deserialize, - ) - return self._stubs['get_cluster'] - - @property - def list_clusters(self) -> Callable[ - [clusters.ListClustersRequest], - clusters.ListClustersResponse]: - r"""Return a callable for the list clusters method over gRPC. - - Lists all regions/{region}/clusters in a project - alphabetically. - - Returns: - Callable[[~.ListClustersRequest], - ~.ListClustersResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_clusters' not in self._stubs: - self._stubs['list_clusters'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.ClusterController/ListClusters', - request_serializer=clusters.ListClustersRequest.serialize, - response_deserializer=clusters.ListClustersResponse.deserialize, - ) - return self._stubs['list_clusters'] - - @property - def diagnose_cluster(self) -> Callable[ - [clusters.DiagnoseClusterRequest], - operations_pb2.Operation]: - r"""Return a callable for the diagnose cluster method over gRPC. - - Gets cluster diagnostic information. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - After the operation completes, - [Operation.response][google.longrunning.Operation.response] - contains [Empty][google.protobuf.Empty]. - - Returns: - Callable[[~.DiagnoseClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'diagnose_cluster' not in self._stubs: - self._stubs['diagnose_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster', - request_serializer=clusters.DiagnoseClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['diagnose_cluster'] - - -__all__ = ( - 'ClusterControllerGrpcTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py deleted file mode 100644 index f4445c89..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py +++ /dev/null @@ -1,423 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.dataproc_v1beta2.types import clusters -from google.longrunning import operations_pb2 # type: ignore -from .base import ClusterControllerTransport, DEFAULT_CLIENT_INFO -from .grpc import ClusterControllerGrpcTransport - - -class ClusterControllerGrpcAsyncIOTransport(ClusterControllerTransport): - """gRPC AsyncIO backend transport for ClusterController. - - The ClusterControllerService provides methods to manage - clusters of Compute Engine instances. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_cluster(self) -> Callable[ - [clusters.CreateClusterRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create cluster method over gRPC. - - Creates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Returns: - Callable[[~.CreateClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_cluster' not in self._stubs: - self._stubs['create_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster', - request_serializer=clusters.CreateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_cluster'] - - @property - def update_cluster(self) -> Callable[ - [clusters.UpdateClusterRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update cluster method over gRPC. - - Updates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Returns: - Callable[[~.UpdateClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_cluster' not in self._stubs: - self._stubs['update_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster', - request_serializer=clusters.UpdateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_cluster'] - - @property - def delete_cluster(self) -> Callable[ - [clusters.DeleteClusterRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete cluster method over gRPC. - - Deletes a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - - Returns: - Callable[[~.DeleteClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_cluster' not in self._stubs: - self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster', - request_serializer=clusters.DeleteClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_cluster'] - - @property - def get_cluster(self) -> Callable[ - [clusters.GetClusterRequest], - Awaitable[clusters.Cluster]]: - r"""Return a callable for the get cluster method over gRPC. - - Gets the resource representation for a cluster in a - project. - - Returns: - Callable[[~.GetClusterRequest], - Awaitable[~.Cluster]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_cluster' not in self._stubs: - self._stubs['get_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.ClusterController/GetCluster', - request_serializer=clusters.GetClusterRequest.serialize, - response_deserializer=clusters.Cluster.deserialize, - ) - return self._stubs['get_cluster'] - - @property - def list_clusters(self) -> Callable[ - [clusters.ListClustersRequest], - Awaitable[clusters.ListClustersResponse]]: - r"""Return a callable for the list clusters method over gRPC. - - Lists all regions/{region}/clusters in a project - alphabetically. - - Returns: - Callable[[~.ListClustersRequest], - Awaitable[~.ListClustersResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_clusters' not in self._stubs: - self._stubs['list_clusters'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.ClusterController/ListClusters', - request_serializer=clusters.ListClustersRequest.serialize, - response_deserializer=clusters.ListClustersResponse.deserialize, - ) - return self._stubs['list_clusters'] - - @property - def diagnose_cluster(self) -> Callable[ - [clusters.DiagnoseClusterRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the diagnose cluster method over gRPC. - - Gets cluster diagnostic information. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will - be - `ClusterOperationMetadata `__. - After the operation completes, - [Operation.response][google.longrunning.Operation.response] - contains [Empty][google.protobuf.Empty]. - - Returns: - Callable[[~.DiagnoseClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'diagnose_cluster' not in self._stubs: - self._stubs['diagnose_cluster'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster', - request_serializer=clusters.DiagnoseClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['diagnose_cluster'] - - -__all__ = ( - 'ClusterControllerGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/__init__.py deleted file mode 100644 index 19ac5a98..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import JobControllerClient -from .async_client import JobControllerAsyncClient - -__all__ = ( - 'JobControllerClient', - 'JobControllerAsyncClient', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py deleted file mode 100644 index 15c53d3e..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py +++ /dev/null @@ -1,796 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.dataproc_v1beta2.services.job_controller import pagers -from google.cloud.dataproc_v1beta2.types import jobs -from .transports.base import JobControllerTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport -from .client import JobControllerClient - - -class JobControllerAsyncClient: - """The JobController provides methods to manage jobs.""" - - _client: JobControllerClient - - DEFAULT_ENDPOINT = JobControllerClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = JobControllerClient.DEFAULT_MTLS_ENDPOINT - - common_billing_account_path = staticmethod(JobControllerClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(JobControllerClient.parse_common_billing_account_path) - common_folder_path = staticmethod(JobControllerClient.common_folder_path) - parse_common_folder_path = staticmethod(JobControllerClient.parse_common_folder_path) - common_organization_path = staticmethod(JobControllerClient.common_organization_path) - parse_common_organization_path = staticmethod(JobControllerClient.parse_common_organization_path) - common_project_path = staticmethod(JobControllerClient.common_project_path) - parse_common_project_path = staticmethod(JobControllerClient.parse_common_project_path) - common_location_path = staticmethod(JobControllerClient.common_location_path) - parse_common_location_path = staticmethod(JobControllerClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobControllerAsyncClient: The constructed client. - """ - return JobControllerClient.from_service_account_info.__func__(JobControllerAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobControllerAsyncClient: The constructed client. - """ - return JobControllerClient.from_service_account_file.__func__(JobControllerAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> JobControllerTransport: - """Returns the transport used by the client instance. - - Returns: - JobControllerTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(JobControllerClient).get_transport_class, type(JobControllerClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, JobControllerTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the job controller client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.JobControllerTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = JobControllerClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def submit_job(self, - request: jobs.SubmitJobRequest = None, - *, - project_id: str = None, - region: str = None, - job: jobs.Job = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Submits a job to a cluster. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.SubmitJobRequest`): - The request object. A request to submit a job. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job (:class:`google.cloud.dataproc_v1beta2.types.Job`): - Required. The job resource. - This corresponds to the ``job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = jobs.SubmitJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job is not None: - request.job = job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.submit_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def submit_job_as_operation(self, - request: jobs.SubmitJobRequest = None, - *, - project_id: str = None, - region: str = None, - job: jobs.Job = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Submits job to a cluster. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.SubmitJobRequest`): - The request object. A request to submit a job. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job (:class:`google.cloud.dataproc_v1beta2.types.Job`): - Required. The job resource. - This corresponds to the ``job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.dataproc_v1beta2.types.Job` A - Dataproc job resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = jobs.SubmitJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job is not None: - request.job = job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.submit_job_as_operation, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - jobs.Job, - metadata_type=jobs.JobMetadata, - ) - - # Done; return the response. - return response - - async def get_job(self, - request: jobs.GetJobRequest = None, - *, - project_id: str = None, - region: str = None, - job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Gets the resource representation for a job in a - project. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.GetJobRequest`): - The request object. A request to get the resource - representation for a job in a project. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job_id (:class:`str`): - Required. The job ID. - This corresponds to the ``job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = jobs.GetJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job_id is not None: - request.job_id = job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_jobs(self, - request: jobs.ListJobsRequest = None, - *, - project_id: str = None, - region: str = None, - filter: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListJobsAsyncPager: - r"""Lists regions/{region}/jobs in a project. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.ListJobsRequest`): - The request object. A request to list jobs in a project. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - filter (:class:`str`): - Optional. A filter constraining the jobs to list. - Filters are case-sensitive and have the following - syntax: - - [field = value] AND [field [= value]] ... - - where **field** is ``status.state`` or ``labels.[KEY]``, - and ``[KEY]`` is a label key. **value** can be ``*`` to - match all values. ``status.state`` can be either - ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` - operator is supported; space-separated items are treated - as having an implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND labels.env = staging AND - labels.starred = \* - - This corresponds to the ``filter`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.services.job_controller.pagers.ListJobsAsyncPager: - A list of jobs in a project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, filter]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = jobs.ListJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if filter is not None: - request.filter = filter - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_jobs, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_job(self, - request: jobs.UpdateJobRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Updates a job in a project. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.UpdateJobRequest`): - The request object. A request to update a job. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - request = jobs.UpdateJobRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def cancel_job(self, - request: jobs.CancelJobRequest = None, - *, - project_id: str = None, - region: str = None, - job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Starts a job cancellation request. To access the job resource - after cancellation, call - `regions/{region}/jobs.list `__ - or - `regions/{region}/jobs.get `__. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.CancelJobRequest`): - The request object. A request to cancel a job. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job_id (:class:`str`): - Required. The job ID. - This corresponds to the ``job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = jobs.CancelJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job_id is not None: - request.job_id = job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_job(self, - request: jobs.DeleteJobRequest = None, - *, - project_id: str = None, - region: str = None, - job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes the job from the project. If the job is active, the - delete fails, and the response returns ``FAILED_PRECONDITION``. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.DeleteJobRequest`): - The request object. A request to delete a job. - project_id (:class:`str`): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (:class:`str`): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job_id (:class:`str`): - Required. The job ID. - This corresponds to the ``job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = jobs.DeleteJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job_id is not None: - request.job_id = job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "JobControllerAsyncClient", -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/client.py deleted file mode 100644 index 024cf2fd..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/client.py +++ /dev/null @@ -1,927 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.dataproc_v1beta2.services.job_controller import pagers -from google.cloud.dataproc_v1beta2.types import jobs -from .transports.base import JobControllerTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import JobControllerGrpcTransport -from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport - - -class JobControllerClientMeta(type): - """Metaclass for the JobController client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]] - _transport_registry["grpc"] = JobControllerGrpcTransport - _transport_registry["grpc_asyncio"] = JobControllerGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[JobControllerTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class JobControllerClient(metaclass=JobControllerClientMeta): - """The JobController provides methods to manage jobs.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "dataproc.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobControllerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobControllerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> JobControllerTransport: - """Returns the transport used by the client instance. - - Returns: - JobControllerTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, JobControllerTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the job controller client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, JobControllerTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, JobControllerTransport): - # transport is a JobControllerTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def submit_job(self, - request: jobs.SubmitJobRequest = None, - *, - project_id: str = None, - region: str = None, - job: jobs.Job = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Submits a job to a cluster. - - Args: - request (google.cloud.dataproc_v1beta2.types.SubmitJobRequest): - The request object. A request to submit a job. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job (google.cloud.dataproc_v1beta2.types.Job): - Required. The job resource. - This corresponds to the ``job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a jobs.SubmitJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.SubmitJobRequest): - request = jobs.SubmitJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job is not None: - request.job = job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.submit_job] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def submit_job_as_operation(self, - request: jobs.SubmitJobRequest = None, - *, - project_id: str = None, - region: str = None, - job: jobs.Job = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Submits job to a cluster. - - Args: - request (google.cloud.dataproc_v1beta2.types.SubmitJobRequest): - The request object. A request to submit a job. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job (google.cloud.dataproc_v1beta2.types.Job): - Required. The job resource. - This corresponds to the ``job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.dataproc_v1beta2.types.Job` A - Dataproc job resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a jobs.SubmitJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.SubmitJobRequest): - request = jobs.SubmitJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job is not None: - request.job = job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.submit_job_as_operation] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - jobs.Job, - metadata_type=jobs.JobMetadata, - ) - - # Done; return the response. - return response - - def get_job(self, - request: jobs.GetJobRequest = None, - *, - project_id: str = None, - region: str = None, - job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Gets the resource representation for a job in a - project. - - Args: - request (google.cloud.dataproc_v1beta2.types.GetJobRequest): - The request object. A request to get the resource - representation for a job in a project. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job_id (str): - Required. The job ID. - This corresponds to the ``job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a jobs.GetJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.GetJobRequest): - request = jobs.GetJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job_id is not None: - request.job_id = job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_job] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_jobs(self, - request: jobs.ListJobsRequest = None, - *, - project_id: str = None, - region: str = None, - filter: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListJobsPager: - r"""Lists regions/{region}/jobs in a project. - - Args: - request (google.cloud.dataproc_v1beta2.types.ListJobsRequest): - The request object. A request to list jobs in a project. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - filter (str): - Optional. A filter constraining the jobs to list. - Filters are case-sensitive and have the following - syntax: - - [field = value] AND [field [= value]] ... - - where **field** is ``status.state`` or ``labels.[KEY]``, - and ``[KEY]`` is a label key. **value** can be ``*`` to - match all values. ``status.state`` can be either - ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` - operator is supported; space-separated items are treated - as having an implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND labels.env = staging AND - labels.starred = \* - - This corresponds to the ``filter`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.services.job_controller.pagers.ListJobsPager: - A list of jobs in a project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, filter]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a jobs.ListJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.ListJobsRequest): - request = jobs.ListJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if filter is not None: - request.filter = filter - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_jobs] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_job(self, - request: jobs.UpdateJobRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Updates a job in a project. - - Args: - request (google.cloud.dataproc_v1beta2.types.UpdateJobRequest): - The request object. A request to update a job. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a jobs.UpdateJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.UpdateJobRequest): - request = jobs.UpdateJobRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_job] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def cancel_job(self, - request: jobs.CancelJobRequest = None, - *, - project_id: str = None, - region: str = None, - job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> jobs.Job: - r"""Starts a job cancellation request. To access the job resource - after cancellation, call - `regions/{region}/jobs.list `__ - or - `regions/{region}/jobs.get `__. - - Args: - request (google.cloud.dataproc_v1beta2.types.CancelJobRequest): - The request object. A request to cancel a job. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job_id (str): - Required. The job ID. - This corresponds to the ``job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.Job: - A Dataproc job resource. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a jobs.CancelJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.CancelJobRequest): - request = jobs.CancelJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job_id is not None: - request.job_id = job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_job] - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_job(self, - request: jobs.DeleteJobRequest = None, - *, - project_id: str = None, - region: str = None, - job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes the job from the project. If the job is active, the - delete fails, and the response returns ``FAILED_PRECONDITION``. - - Args: - request (google.cloud.dataproc_v1beta2.types.DeleteJobRequest): - The request object. A request to delete a job. - project_id (str): - Required. The ID of the Google Cloud - Platform project that the job belongs - to. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - region (str): - Required. The Dataproc region in - which to handle the request. - - This corresponds to the ``region`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - job_id (str): - Required. The job ID. - This corresponds to the ``job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, region, job_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a jobs.DeleteJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, jobs.DeleteJobRequest): - request = jobs.DeleteJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if region is not None: - request.region = region - if job_id is not None: - request.job_id = job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_job] - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "JobControllerClient", -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py deleted file mode 100644 index 9f8b6150..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.dataproc_v1beta2.types import jobs - - -class ListJobsPager: - """A pager for iterating through ``list_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1beta2.types.ListJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListJobs`` requests and continue to iterate - through the ``jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1beta2.types.ListJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., jobs.ListJobsResponse], - request: jobs.ListJobsRequest, - response: jobs.ListJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1beta2.types.ListJobsRequest): - The initial request object. - response (google.cloud.dataproc_v1beta2.types.ListJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = jobs.ListJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[jobs.ListJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[jobs.Job]: - for page in self.pages: - yield from page.jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListJobsAsyncPager: - """A pager for iterating through ``list_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1beta2.types.ListJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListJobs`` requests and continue to iterate - through the ``jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1beta2.types.ListJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[jobs.ListJobsResponse]], - request: jobs.ListJobsRequest, - response: jobs.ListJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1beta2.types.ListJobsRequest): - The initial request object. - response (google.cloud.dataproc_v1beta2.types.ListJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = jobs.ListJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[jobs.ListJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[jobs.Job]: - async def async_generator(): - async for page in self.pages: - for response in page.jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py deleted file mode 100644 index b35119f2..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import JobControllerTransport -from .grpc import JobControllerGrpcTransport -from .grpc_asyncio import JobControllerGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]] -_transport_registry['grpc'] = JobControllerGrpcTransport -_transport_registry['grpc_asyncio'] = JobControllerGrpcAsyncIOTransport - -__all__ = ( - 'JobControllerTransport', - 'JobControllerGrpcTransport', - 'JobControllerGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py deleted file mode 100644 index f6b13988..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py +++ /dev/null @@ -1,308 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.dataproc_v1beta2.types import jobs -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-dataproc', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class JobControllerTransport(abc.ABC): - """Abstract transport class for JobController.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'dataproc.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.submit_job: gapic_v1.method.wrap_method( - self.submit_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - self.submit_job_as_operation: gapic_v1.method.wrap_method( - self.submit_job_as_operation, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - self.get_job: gapic_v1.method.wrap_method( - self.get_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - self.list_jobs: gapic_v1.method.wrap_method( - self.list_jobs, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - self.update_job: gapic_v1.method.wrap_method( - self.update_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - self.cancel_job: gapic_v1.method.wrap_method( - self.cancel_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - self.delete_job: gapic_v1.method.wrap_method( - self.delete_job, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=900.0, - ), - default_timeout=900.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def submit_job(self) -> Callable[ - [jobs.SubmitJobRequest], - Union[ - jobs.Job, - Awaitable[jobs.Job] - ]]: - raise NotImplementedError() - - @property - def submit_job_as_operation(self) -> Callable[ - [jobs.SubmitJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_job(self) -> Callable[ - [jobs.GetJobRequest], - Union[ - jobs.Job, - Awaitable[jobs.Job] - ]]: - raise NotImplementedError() - - @property - def list_jobs(self) -> Callable[ - [jobs.ListJobsRequest], - Union[ - jobs.ListJobsResponse, - Awaitable[jobs.ListJobsResponse] - ]]: - raise NotImplementedError() - - @property - def update_job(self) -> Callable[ - [jobs.UpdateJobRequest], - Union[ - jobs.Job, - Awaitable[jobs.Job] - ]]: - raise NotImplementedError() - - @property - def cancel_job(self) -> Callable[ - [jobs.CancelJobRequest], - Union[ - jobs.Job, - Awaitable[jobs.Job] - ]]: - raise NotImplementedError() - - @property - def delete_job(self) -> Callable[ - [jobs.DeleteJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'JobControllerTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py deleted file mode 100644 index a4a34ea4..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py +++ /dev/null @@ -1,434 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.dataproc_v1beta2.types import jobs -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import JobControllerTransport, DEFAULT_CLIENT_INFO - - -class JobControllerGrpcTransport(JobControllerTransport): - """gRPC backend transport for JobController. - - The JobController provides methods to manage jobs. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def submit_job(self) -> Callable[ - [jobs.SubmitJobRequest], - jobs.Job]: - r"""Return a callable for the submit job method over gRPC. - - Submits a job to a cluster. - - Returns: - Callable[[~.SubmitJobRequest], - ~.Job]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'submit_job' not in self._stubs: - self._stubs['submit_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/SubmitJob', - request_serializer=jobs.SubmitJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['submit_job'] - - @property - def submit_job_as_operation(self) -> Callable[ - [jobs.SubmitJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the submit job as operation method over gRPC. - - Submits job to a cluster. - - Returns: - Callable[[~.SubmitJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'submit_job_as_operation' not in self._stubs: - self._stubs['submit_job_as_operation'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/SubmitJobAsOperation', - request_serializer=jobs.SubmitJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['submit_job_as_operation'] - - @property - def get_job(self) -> Callable[ - [jobs.GetJobRequest], - jobs.Job]: - r"""Return a callable for the get job method over gRPC. - - Gets the resource representation for a job in a - project. - - Returns: - Callable[[~.GetJobRequest], - ~.Job]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_job' not in self._stubs: - self._stubs['get_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/GetJob', - request_serializer=jobs.GetJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['get_job'] - - @property - def list_jobs(self) -> Callable[ - [jobs.ListJobsRequest], - jobs.ListJobsResponse]: - r"""Return a callable for the list jobs method over gRPC. - - Lists regions/{region}/jobs in a project. - - Returns: - Callable[[~.ListJobsRequest], - ~.ListJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_jobs' not in self._stubs: - self._stubs['list_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/ListJobs', - request_serializer=jobs.ListJobsRequest.serialize, - response_deserializer=jobs.ListJobsResponse.deserialize, - ) - return self._stubs['list_jobs'] - - @property - def update_job(self) -> Callable[ - [jobs.UpdateJobRequest], - jobs.Job]: - r"""Return a callable for the update job method over gRPC. - - Updates a job in a project. - - Returns: - Callable[[~.UpdateJobRequest], - ~.Job]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_job' not in self._stubs: - self._stubs['update_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/UpdateJob', - request_serializer=jobs.UpdateJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['update_job'] - - @property - def cancel_job(self) -> Callable[ - [jobs.CancelJobRequest], - jobs.Job]: - r"""Return a callable for the cancel job method over gRPC. - - Starts a job cancellation request. To access the job resource - after cancellation, call - `regions/{region}/jobs.list `__ - or - `regions/{region}/jobs.get `__. - - Returns: - Callable[[~.CancelJobRequest], - ~.Job]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_job' not in self._stubs: - self._stubs['cancel_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/CancelJob', - request_serializer=jobs.CancelJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['cancel_job'] - - @property - def delete_job(self) -> Callable[ - [jobs.DeleteJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete job method over gRPC. - - Deletes the job from the project. If the job is active, the - delete fails, and the response returns ``FAILED_PRECONDITION``. - - Returns: - Callable[[~.DeleteJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_job' not in self._stubs: - self._stubs['delete_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/DeleteJob', - request_serializer=jobs.DeleteJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_job'] - - -__all__ = ( - 'JobControllerGrpcTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py deleted file mode 100644 index b846035d..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py +++ /dev/null @@ -1,438 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.dataproc_v1beta2.types import jobs -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import JobControllerTransport, DEFAULT_CLIENT_INFO -from .grpc import JobControllerGrpcTransport - - -class JobControllerGrpcAsyncIOTransport(JobControllerTransport): - """gRPC AsyncIO backend transport for JobController. - - The JobController provides methods to manage jobs. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def submit_job(self) -> Callable[ - [jobs.SubmitJobRequest], - Awaitable[jobs.Job]]: - r"""Return a callable for the submit job method over gRPC. - - Submits a job to a cluster. - - Returns: - Callable[[~.SubmitJobRequest], - Awaitable[~.Job]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'submit_job' not in self._stubs: - self._stubs['submit_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/SubmitJob', - request_serializer=jobs.SubmitJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['submit_job'] - - @property - def submit_job_as_operation(self) -> Callable[ - [jobs.SubmitJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the submit job as operation method over gRPC. - - Submits job to a cluster. - - Returns: - Callable[[~.SubmitJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'submit_job_as_operation' not in self._stubs: - self._stubs['submit_job_as_operation'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/SubmitJobAsOperation', - request_serializer=jobs.SubmitJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['submit_job_as_operation'] - - @property - def get_job(self) -> Callable[ - [jobs.GetJobRequest], - Awaitable[jobs.Job]]: - r"""Return a callable for the get job method over gRPC. - - Gets the resource representation for a job in a - project. - - Returns: - Callable[[~.GetJobRequest], - Awaitable[~.Job]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_job' not in self._stubs: - self._stubs['get_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/GetJob', - request_serializer=jobs.GetJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['get_job'] - - @property - def list_jobs(self) -> Callable[ - [jobs.ListJobsRequest], - Awaitable[jobs.ListJobsResponse]]: - r"""Return a callable for the list jobs method over gRPC. - - Lists regions/{region}/jobs in a project. - - Returns: - Callable[[~.ListJobsRequest], - Awaitable[~.ListJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_jobs' not in self._stubs: - self._stubs['list_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/ListJobs', - request_serializer=jobs.ListJobsRequest.serialize, - response_deserializer=jobs.ListJobsResponse.deserialize, - ) - return self._stubs['list_jobs'] - - @property - def update_job(self) -> Callable[ - [jobs.UpdateJobRequest], - Awaitable[jobs.Job]]: - r"""Return a callable for the update job method over gRPC. - - Updates a job in a project. - - Returns: - Callable[[~.UpdateJobRequest], - Awaitable[~.Job]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_job' not in self._stubs: - self._stubs['update_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/UpdateJob', - request_serializer=jobs.UpdateJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['update_job'] - - @property - def cancel_job(self) -> Callable[ - [jobs.CancelJobRequest], - Awaitable[jobs.Job]]: - r"""Return a callable for the cancel job method over gRPC. - - Starts a job cancellation request. To access the job resource - after cancellation, call - `regions/{region}/jobs.list `__ - or - `regions/{region}/jobs.get `__. - - Returns: - Callable[[~.CancelJobRequest], - Awaitable[~.Job]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_job' not in self._stubs: - self._stubs['cancel_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/CancelJob', - request_serializer=jobs.CancelJobRequest.serialize, - response_deserializer=jobs.Job.deserialize, - ) - return self._stubs['cancel_job'] - - @property - def delete_job(self) -> Callable[ - [jobs.DeleteJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete job method over gRPC. - - Deletes the job from the project. If the job is active, the - delete fails, and the response returns ``FAILED_PRECONDITION``. - - Returns: - Callable[[~.DeleteJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_job' not in self._stubs: - self._stubs['delete_job'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.JobController/DeleteJob', - request_serializer=jobs.DeleteJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_job'] - - -__all__ = ( - 'JobControllerGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py deleted file mode 100644 index 1dd621e9..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import WorkflowTemplateServiceClient -from .async_client import WorkflowTemplateServiceAsyncClient - -__all__ = ( - 'WorkflowTemplateServiceClient', - 'WorkflowTemplateServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py deleted file mode 100644 index 066c7077..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py +++ /dev/null @@ -1,943 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.dataproc_v1beta2.services.workflow_template_service import pagers -from google.cloud.dataproc_v1beta2.types import workflow_templates -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport -from .client import WorkflowTemplateServiceClient - - -class WorkflowTemplateServiceAsyncClient: - """The API interface for managing Workflow Templates in the - Dataproc API. - """ - - _client: WorkflowTemplateServiceClient - - DEFAULT_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_MTLS_ENDPOINT - - cluster_path = staticmethod(WorkflowTemplateServiceClient.cluster_path) - parse_cluster_path = staticmethod(WorkflowTemplateServiceClient.parse_cluster_path) - workflow_template_path = staticmethod(WorkflowTemplateServiceClient.workflow_template_path) - parse_workflow_template_path = staticmethod(WorkflowTemplateServiceClient.parse_workflow_template_path) - common_billing_account_path = staticmethod(WorkflowTemplateServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(WorkflowTemplateServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(WorkflowTemplateServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(WorkflowTemplateServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(WorkflowTemplateServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(WorkflowTemplateServiceClient.parse_common_organization_path) - common_project_path = staticmethod(WorkflowTemplateServiceClient.common_project_path) - parse_common_project_path = staticmethod(WorkflowTemplateServiceClient.parse_common_project_path) - common_location_path = staticmethod(WorkflowTemplateServiceClient.common_location_path) - parse_common_location_path = staticmethod(WorkflowTemplateServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - WorkflowTemplateServiceAsyncClient: The constructed client. - """ - return WorkflowTemplateServiceClient.from_service_account_info.__func__(WorkflowTemplateServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - WorkflowTemplateServiceAsyncClient: The constructed client. - """ - return WorkflowTemplateServiceClient.from_service_account_file.__func__(WorkflowTemplateServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> WorkflowTemplateServiceTransport: - """Returns the transport used by the client instance. - - Returns: - WorkflowTemplateServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(WorkflowTemplateServiceClient).get_transport_class, type(WorkflowTemplateServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, WorkflowTemplateServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the workflow template service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.WorkflowTemplateServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = WorkflowTemplateServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_workflow_template(self, - request: workflow_templates.CreateWorkflowTemplateRequest = None, - *, - parent: str = None, - template: workflow_templates.WorkflowTemplate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> workflow_templates.WorkflowTemplate: - r"""Creates new workflow template. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.CreateWorkflowTemplateRequest`): - The request object. A request to create a workflow - template. - parent (:class:`str`): - Required. The resource name of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,create``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.create``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - template (:class:`google.cloud.dataproc_v1beta2.types.WorkflowTemplate`): - Required. The Dataproc workflow - template to create. - - This corresponds to the ``template`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.WorkflowTemplate: - A Dataproc workflow template - resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, template]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.CreateWorkflowTemplateRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if template is not None: - request.template = template - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_workflow_template(self, - request: workflow_templates.GetWorkflowTemplateRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> workflow_templates.WorkflowTemplate: - r"""Retrieves the latest workflow template. - Can retrieve previously instantiated template by - specifying optional version parameter. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.GetWorkflowTemplateRequest`): - The request object. A request to fetch a workflow - template. - name (:class:`str`): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.get``, the - resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.get``, the - resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.WorkflowTemplate: - A Dataproc workflow template - resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.GetWorkflowTemplateRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def instantiate_workflow_template(self, - request: workflow_templates.InstantiateWorkflowTemplateRequest = None, - *, - name: str = None, - parameters: Sequence[workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Instantiates a template and begins execution. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest`): - The request object. A request to instantiate a workflow - template. - name (:class:`str`): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For - ``projects.regions.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For - ``projects.locations.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (:class:`Sequence[google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest.ParametersEntry]`): - Optional. Map from parameter names to - values that should be used for those - parameters. Values may not exceed 100 - characters. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, parameters]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.InstantiateWorkflowTemplateRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - if parameters: - request.parameters.update(parameters) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.instantiate_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=workflow_templates.WorkflowMetadata, - ) - - # Done; return the response. - return response - - async def instantiate_inline_workflow_template(self, - request: workflow_templates.InstantiateInlineWorkflowTemplateRequest = None, - *, - parent: str = None, - template: workflow_templates.WorkflowTemplate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], - [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], - [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.InstantiateInlineWorkflowTemplateRequest`): - The request object. A request to instantiate an inline - workflow template. - parent (:class:`str`): - Required. The resource name of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For - ``projects.regions.workflowTemplates,instantiateinline``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For - ``projects.locations.workflowTemplates.instantiateinline``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - template (:class:`google.cloud.dataproc_v1beta2.types.WorkflowTemplate`): - Required. The workflow template to - instantiate. - - This corresponds to the ``template`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, template]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.InstantiateInlineWorkflowTemplateRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if template is not None: - request.template = template - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.instantiate_inline_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=workflow_templates.WorkflowMetadata, - ) - - # Done; return the response. - return response - - async def update_workflow_template(self, - request: workflow_templates.UpdateWorkflowTemplateRequest = None, - *, - template: workflow_templates.WorkflowTemplate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> workflow_templates.WorkflowTemplate: - r"""Updates (replaces) workflow template. The updated - template must contain version that matches the current - server version. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.UpdateWorkflowTemplateRequest`): - The request object. A request to update a workflow - template. - template (:class:`google.cloud.dataproc_v1beta2.types.WorkflowTemplate`): - Required. The updated workflow template. - - The ``template.version`` field must match the current - version. - - This corresponds to the ``template`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.WorkflowTemplate: - A Dataproc workflow template - resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([template]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.UpdateWorkflowTemplateRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if template is not None: - request.template = template - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("template.name", request.template.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_workflow_templates(self, - request: workflow_templates.ListWorkflowTemplatesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListWorkflowTemplatesAsyncPager: - r"""Lists workflows that match the specified filter in - the request. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesRequest`): - The request object. A request to list workflow templates - in a project. - parent (:class:`str`): - Required. The resource name of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,list``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.list``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.services.workflow_template_service.pagers.ListWorkflowTemplatesAsyncPager: - A response to a request to list - workflow templates in a project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.ListWorkflowTemplatesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_workflow_templates, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListWorkflowTemplatesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_workflow_template(self, - request: workflow_templates.DeleteWorkflowTemplateRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a workflow template. It does not cancel in- - rogress workflows. - - Args: - request (:class:`google.cloud.dataproc_v1beta2.types.DeleteWorkflowTemplateRequest`): - The request object. A request to delete a workflow - template. - Currently started workflows will remain running. - name (:class:`str`): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.delete``, - the resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For - ``projects.locations.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = workflow_templates.DeleteWorkflowTemplateRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "WorkflowTemplateServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py deleted file mode 100644 index fd524a3d..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py +++ /dev/null @@ -1,1092 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.dataproc_v1beta2.services.workflow_template_service import pagers -from google.cloud.dataproc_v1beta2.types import workflow_templates -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import WorkflowTemplateServiceGrpcTransport -from .transports.grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport - - -class WorkflowTemplateServiceClientMeta(type): - """Metaclass for the WorkflowTemplateService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[WorkflowTemplateServiceTransport]] - _transport_registry["grpc"] = WorkflowTemplateServiceGrpcTransport - _transport_registry["grpc_asyncio"] = WorkflowTemplateServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[WorkflowTemplateServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class WorkflowTemplateServiceClient(metaclass=WorkflowTemplateServiceClientMeta): - """The API interface for managing Workflow Templates in the - Dataproc API. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "dataproc.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - WorkflowTemplateServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - WorkflowTemplateServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> WorkflowTemplateServiceTransport: - """Returns the transport used by the client instance. - - Returns: - WorkflowTemplateServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def cluster_path(project: str,location: str,cluster: str,) -> str: - """Returns a fully-qualified cluster string.""" - return "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) - - @staticmethod - def parse_cluster_path(path: str) -> Dict[str,str]: - """Parses a cluster path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def workflow_template_path(project: str,region: str,workflow_template: str,) -> str: - """Returns a fully-qualified workflow_template string.""" - return "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format(project=project, region=region, workflow_template=workflow_template, ) - - @staticmethod - def parse_workflow_template_path(path: str) -> Dict[str,str]: - """Parses a workflow_template path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/regions/(?P.+?)/workflowTemplates/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, WorkflowTemplateServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the workflow template service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, WorkflowTemplateServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, WorkflowTemplateServiceTransport): - # transport is a WorkflowTemplateServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_workflow_template(self, - request: workflow_templates.CreateWorkflowTemplateRequest = None, - *, - parent: str = None, - template: workflow_templates.WorkflowTemplate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> workflow_templates.WorkflowTemplate: - r"""Creates new workflow template. - - Args: - request (google.cloud.dataproc_v1beta2.types.CreateWorkflowTemplateRequest): - The request object. A request to create a workflow - template. - parent (str): - Required. The resource name of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,create``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.create``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): - Required. The Dataproc workflow - template to create. - - This corresponds to the ``template`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.WorkflowTemplate: - A Dataproc workflow template - resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, template]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.CreateWorkflowTemplateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.CreateWorkflowTemplateRequest): - request = workflow_templates.CreateWorkflowTemplateRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if template is not None: - request.template = template - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_workflow_template] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_workflow_template(self, - request: workflow_templates.GetWorkflowTemplateRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> workflow_templates.WorkflowTemplate: - r"""Retrieves the latest workflow template. - Can retrieve previously instantiated template by - specifying optional version parameter. - - Args: - request (google.cloud.dataproc_v1beta2.types.GetWorkflowTemplateRequest): - The request object. A request to fetch a workflow - template. - name (str): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.get``, the - resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.get``, the - resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.WorkflowTemplate: - A Dataproc workflow template - resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.GetWorkflowTemplateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.GetWorkflowTemplateRequest): - request = workflow_templates.GetWorkflowTemplateRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_workflow_template] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def instantiate_workflow_template(self, - request: workflow_templates.InstantiateWorkflowTemplateRequest = None, - *, - name: str = None, - parameters: Sequence[workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Instantiates a template and begins execution. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Args: - request (google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest): - The request object. A request to instantiate a workflow - template. - name (str): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For - ``projects.regions.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For - ``projects.locations.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (Sequence[google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest.ParametersEntry]): - Optional. Map from parameter names to - values that should be used for those - parameters. Values may not exceed 100 - characters. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, parameters]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.InstantiateWorkflowTemplateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.InstantiateWorkflowTemplateRequest): - request = workflow_templates.InstantiateWorkflowTemplateRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if parameters is not None: - request.parameters = parameters - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.instantiate_workflow_template] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=workflow_templates.WorkflowMetadata, - ) - - # Done; return the response. - return response - - def instantiate_inline_workflow_template(self, - request: workflow_templates.InstantiateInlineWorkflowTemplateRequest = None, - *, - parent: str = None, - template: workflow_templates.WorkflowTemplate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], - [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], - [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Args: - request (google.cloud.dataproc_v1beta2.types.InstantiateInlineWorkflowTemplateRequest): - The request object. A request to instantiate an inline - workflow template. - parent (str): - Required. The resource name of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For - ``projects.regions.workflowTemplates,instantiateinline``, - the resource name of the region has the following - format: ``projects/{project_id}/regions/{region}`` - - - For - ``projects.locations.workflowTemplates.instantiateinline``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): - Required. The workflow template to - instantiate. - - This corresponds to the ``template`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, template]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.InstantiateInlineWorkflowTemplateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.InstantiateInlineWorkflowTemplateRequest): - request = workflow_templates.InstantiateInlineWorkflowTemplateRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if template is not None: - request.template = template - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.instantiate_inline_workflow_template] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=workflow_templates.WorkflowMetadata, - ) - - # Done; return the response. - return response - - def update_workflow_template(self, - request: workflow_templates.UpdateWorkflowTemplateRequest = None, - *, - template: workflow_templates.WorkflowTemplate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> workflow_templates.WorkflowTemplate: - r"""Updates (replaces) workflow template. The updated - template must contain version that matches the current - server version. - - Args: - request (google.cloud.dataproc_v1beta2.types.UpdateWorkflowTemplateRequest): - The request object. A request to update a workflow - template. - template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): - Required. The updated workflow template. - - The ``template.version`` field must match the current - version. - - This corresponds to the ``template`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.types.WorkflowTemplate: - A Dataproc workflow template - resource. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([template]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.UpdateWorkflowTemplateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.UpdateWorkflowTemplateRequest): - request = workflow_templates.UpdateWorkflowTemplateRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if template is not None: - request.template = template - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_workflow_template] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("template.name", request.template.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_workflow_templates(self, - request: workflow_templates.ListWorkflowTemplatesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListWorkflowTemplatesPager: - r"""Lists workflows that match the specified filter in - the request. - - Args: - request (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesRequest): - The request object. A request to list workflow templates - in a project. - parent (str): - Required. The resource name of the region or location, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,list``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.list``, - the resource name of the location has the following - format: - ``projects/{project_id}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.dataproc_v1beta2.services.workflow_template_service.pagers.ListWorkflowTemplatesPager: - A response to a request to list - workflow templates in a project. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.ListWorkflowTemplatesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.ListWorkflowTemplatesRequest): - request = workflow_templates.ListWorkflowTemplatesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_workflow_templates] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListWorkflowTemplatesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_workflow_template(self, - request: workflow_templates.DeleteWorkflowTemplateRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a workflow template. It does not cancel in- - rogress workflows. - - Args: - request (google.cloud.dataproc_v1beta2.types.DeleteWorkflowTemplateRequest): - The request object. A request to delete a workflow - template. - Currently started workflows will remain running. - name (str): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.delete``, - the resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For - ``projects.locations.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a workflow_templates.DeleteWorkflowTemplateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, workflow_templates.DeleteWorkflowTemplateRequest): - request = workflow_templates.DeleteWorkflowTemplateRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_workflow_template] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "WorkflowTemplateServiceClient", -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py deleted file mode 100644 index 8922f151..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.dataproc_v1beta2.types import workflow_templates - - -class ListWorkflowTemplatesPager: - """A pager for iterating through ``list_workflow_templates`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``templates`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListWorkflowTemplates`` requests and continue to iterate - through the ``templates`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., workflow_templates.ListWorkflowTemplatesResponse], - request: workflow_templates.ListWorkflowTemplatesRequest, - response: workflow_templates.ListWorkflowTemplatesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesRequest): - The initial request object. - response (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = workflow_templates.ListWorkflowTemplatesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[workflow_templates.ListWorkflowTemplatesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[workflow_templates.WorkflowTemplate]: - for page in self.pages: - yield from page.templates - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListWorkflowTemplatesAsyncPager: - """A pager for iterating through ``list_workflow_templates`` requests. - - This class thinly wraps an initial - :class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``templates`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListWorkflowTemplates`` requests and continue to iterate - through the ``templates`` field on the - corresponding responses. - - All the usual :class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[workflow_templates.ListWorkflowTemplatesResponse]], - request: workflow_templates.ListWorkflowTemplatesRequest, - response: workflow_templates.ListWorkflowTemplatesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesRequest): - The initial request object. - response (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = workflow_templates.ListWorkflowTemplatesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[workflow_templates.ListWorkflowTemplatesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[workflow_templates.WorkflowTemplate]: - async def async_generator(): - async for page in self.pages: - for response in page.templates: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py deleted file mode 100644 index 96efd4cb..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import WorkflowTemplateServiceTransport -from .grpc import WorkflowTemplateServiceGrpcTransport -from .grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[WorkflowTemplateServiceTransport]] -_transport_registry['grpc'] = WorkflowTemplateServiceGrpcTransport -_transport_registry['grpc_asyncio'] = WorkflowTemplateServiceGrpcAsyncIOTransport - -__all__ = ( - 'WorkflowTemplateServiceTransport', - 'WorkflowTemplateServiceGrpcTransport', - 'WorkflowTemplateServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py deleted file mode 100644 index b8a83efd..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py +++ /dev/null @@ -1,306 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.dataproc_v1beta2.types import workflow_templates -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-dataproc', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class WorkflowTemplateServiceTransport(abc.ABC): - """Abstract transport class for WorkflowTemplateService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'dataproc.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_workflow_template: gapic_v1.method.wrap_method( - self.create_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.get_workflow_template: gapic_v1.method.wrap_method( - self.get_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.instantiate_workflow_template: gapic_v1.method.wrap_method( - self.instantiate_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.instantiate_inline_workflow_template: gapic_v1.method.wrap_method( - self.instantiate_inline_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.update_workflow_template: gapic_v1.method.wrap_method( - self.update_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.list_workflow_templates: gapic_v1.method.wrap_method( - self.list_workflow_templates, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.InternalServerError, - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - self.delete_workflow_template: gapic_v1.method.wrap_method( - self.delete_workflow_template, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.ServiceUnavailable, - ), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_workflow_template(self) -> Callable[ - [workflow_templates.CreateWorkflowTemplateRequest], - Union[ - workflow_templates.WorkflowTemplate, - Awaitable[workflow_templates.WorkflowTemplate] - ]]: - raise NotImplementedError() - - @property - def get_workflow_template(self) -> Callable[ - [workflow_templates.GetWorkflowTemplateRequest], - Union[ - workflow_templates.WorkflowTemplate, - Awaitable[workflow_templates.WorkflowTemplate] - ]]: - raise NotImplementedError() - - @property - def instantiate_workflow_template(self) -> Callable[ - [workflow_templates.InstantiateWorkflowTemplateRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def instantiate_inline_workflow_template(self) -> Callable[ - [workflow_templates.InstantiateInlineWorkflowTemplateRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def update_workflow_template(self) -> Callable[ - [workflow_templates.UpdateWorkflowTemplateRequest], - Union[ - workflow_templates.WorkflowTemplate, - Awaitable[workflow_templates.WorkflowTemplate] - ]]: - raise NotImplementedError() - - @property - def list_workflow_templates(self) -> Callable[ - [workflow_templates.ListWorkflowTemplatesRequest], - Union[ - workflow_templates.ListWorkflowTemplatesResponse, - Awaitable[workflow_templates.ListWorkflowTemplatesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_workflow_template(self) -> Callable[ - [workflow_templates.DeleteWorkflowTemplateRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'WorkflowTemplateServiceTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py deleted file mode 100644 index 9dc86cd7..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py +++ /dev/null @@ -1,481 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.dataproc_v1beta2.types import workflow_templates -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO - - -class WorkflowTemplateServiceGrpcTransport(WorkflowTemplateServiceTransport): - """gRPC backend transport for WorkflowTemplateService. - - The API interface for managing Workflow Templates in the - Dataproc API. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_workflow_template(self) -> Callable[ - [workflow_templates.CreateWorkflowTemplateRequest], - workflow_templates.WorkflowTemplate]: - r"""Return a callable for the create workflow template method over gRPC. - - Creates new workflow template. - - Returns: - Callable[[~.CreateWorkflowTemplateRequest], - ~.WorkflowTemplate]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_workflow_template' not in self._stubs: - self._stubs['create_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate', - request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize, - response_deserializer=workflow_templates.WorkflowTemplate.deserialize, - ) - return self._stubs['create_workflow_template'] - - @property - def get_workflow_template(self) -> Callable[ - [workflow_templates.GetWorkflowTemplateRequest], - workflow_templates.WorkflowTemplate]: - r"""Return a callable for the get workflow template method over gRPC. - - Retrieves the latest workflow template. - Can retrieve previously instantiated template by - specifying optional version parameter. - - Returns: - Callable[[~.GetWorkflowTemplateRequest], - ~.WorkflowTemplate]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_workflow_template' not in self._stubs: - self._stubs['get_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate', - request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize, - response_deserializer=workflow_templates.WorkflowTemplate.deserialize, - ) - return self._stubs['get_workflow_template'] - - @property - def instantiate_workflow_template(self) -> Callable[ - [workflow_templates.InstantiateWorkflowTemplateRequest], - operations_pb2.Operation]: - r"""Return a callable for the instantiate workflow template method over gRPC. - - Instantiates a template and begins execution. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Returns: - Callable[[~.InstantiateWorkflowTemplateRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'instantiate_workflow_template' not in self._stubs: - self._stubs['instantiate_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate', - request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['instantiate_workflow_template'] - - @property - def instantiate_inline_workflow_template(self) -> Callable[ - [workflow_templates.InstantiateInlineWorkflowTemplateRequest], - operations_pb2.Operation]: - r"""Return a callable for the instantiate inline workflow - template method over gRPC. - - Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], - [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], - [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Returns: - Callable[[~.InstantiateInlineWorkflowTemplateRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'instantiate_inline_workflow_template' not in self._stubs: - self._stubs['instantiate_inline_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateInlineWorkflowTemplate', - request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['instantiate_inline_workflow_template'] - - @property - def update_workflow_template(self) -> Callable[ - [workflow_templates.UpdateWorkflowTemplateRequest], - workflow_templates.WorkflowTemplate]: - r"""Return a callable for the update workflow template method over gRPC. - - Updates (replaces) workflow template. The updated - template must contain version that matches the current - server version. - - Returns: - Callable[[~.UpdateWorkflowTemplateRequest], - ~.WorkflowTemplate]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_workflow_template' not in self._stubs: - self._stubs['update_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate', - request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize, - response_deserializer=workflow_templates.WorkflowTemplate.deserialize, - ) - return self._stubs['update_workflow_template'] - - @property - def list_workflow_templates(self) -> Callable[ - [workflow_templates.ListWorkflowTemplatesRequest], - workflow_templates.ListWorkflowTemplatesResponse]: - r"""Return a callable for the list workflow templates method over gRPC. - - Lists workflows that match the specified filter in - the request. - - Returns: - Callable[[~.ListWorkflowTemplatesRequest], - ~.ListWorkflowTemplatesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_workflow_templates' not in self._stubs: - self._stubs['list_workflow_templates'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates', - request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize, - response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize, - ) - return self._stubs['list_workflow_templates'] - - @property - def delete_workflow_template(self) -> Callable[ - [workflow_templates.DeleteWorkflowTemplateRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete workflow template method over gRPC. - - Deletes a workflow template. It does not cancel in- - rogress workflows. - - Returns: - Callable[[~.DeleteWorkflowTemplateRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_workflow_template' not in self._stubs: - self._stubs['delete_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate', - request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_workflow_template'] - - -__all__ = ( - 'WorkflowTemplateServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py deleted file mode 100644 index 3894c9b6..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,485 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.dataproc_v1beta2.types import workflow_templates -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import WorkflowTemplateServiceGrpcTransport - - -class WorkflowTemplateServiceGrpcAsyncIOTransport(WorkflowTemplateServiceTransport): - """gRPC AsyncIO backend transport for WorkflowTemplateService. - - The API interface for managing Workflow Templates in the - Dataproc API. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'dataproc.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_workflow_template(self) -> Callable[ - [workflow_templates.CreateWorkflowTemplateRequest], - Awaitable[workflow_templates.WorkflowTemplate]]: - r"""Return a callable for the create workflow template method over gRPC. - - Creates new workflow template. - - Returns: - Callable[[~.CreateWorkflowTemplateRequest], - Awaitable[~.WorkflowTemplate]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_workflow_template' not in self._stubs: - self._stubs['create_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate', - request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize, - response_deserializer=workflow_templates.WorkflowTemplate.deserialize, - ) - return self._stubs['create_workflow_template'] - - @property - def get_workflow_template(self) -> Callable[ - [workflow_templates.GetWorkflowTemplateRequest], - Awaitable[workflow_templates.WorkflowTemplate]]: - r"""Return a callable for the get workflow template method over gRPC. - - Retrieves the latest workflow template. - Can retrieve previously instantiated template by - specifying optional version parameter. - - Returns: - Callable[[~.GetWorkflowTemplateRequest], - Awaitable[~.WorkflowTemplate]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_workflow_template' not in self._stubs: - self._stubs['get_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate', - request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize, - response_deserializer=workflow_templates.WorkflowTemplate.deserialize, - ) - return self._stubs['get_workflow_template'] - - @property - def instantiate_workflow_template(self) -> Callable[ - [workflow_templates.InstantiateWorkflowTemplateRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the instantiate workflow template method over gRPC. - - Instantiates a template and begins execution. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Returns: - Callable[[~.InstantiateWorkflowTemplateRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'instantiate_workflow_template' not in self._stubs: - self._stubs['instantiate_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate', - request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['instantiate_workflow_template'] - - @property - def instantiate_inline_workflow_template(self) -> Callable[ - [workflow_templates.InstantiateInlineWorkflowTemplateRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the instantiate inline workflow - template method over gRPC. - - Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], - [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], - [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and - workflow-owned clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] - will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will - be [Empty][google.protobuf.Empty]. - - Returns: - Callable[[~.InstantiateInlineWorkflowTemplateRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'instantiate_inline_workflow_template' not in self._stubs: - self._stubs['instantiate_inline_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateInlineWorkflowTemplate', - request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['instantiate_inline_workflow_template'] - - @property - def update_workflow_template(self) -> Callable[ - [workflow_templates.UpdateWorkflowTemplateRequest], - Awaitable[workflow_templates.WorkflowTemplate]]: - r"""Return a callable for the update workflow template method over gRPC. - - Updates (replaces) workflow template. The updated - template must contain version that matches the current - server version. - - Returns: - Callable[[~.UpdateWorkflowTemplateRequest], - Awaitable[~.WorkflowTemplate]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_workflow_template' not in self._stubs: - self._stubs['update_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate', - request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize, - response_deserializer=workflow_templates.WorkflowTemplate.deserialize, - ) - return self._stubs['update_workflow_template'] - - @property - def list_workflow_templates(self) -> Callable[ - [workflow_templates.ListWorkflowTemplatesRequest], - Awaitable[workflow_templates.ListWorkflowTemplatesResponse]]: - r"""Return a callable for the list workflow templates method over gRPC. - - Lists workflows that match the specified filter in - the request. - - Returns: - Callable[[~.ListWorkflowTemplatesRequest], - Awaitable[~.ListWorkflowTemplatesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_workflow_templates' not in self._stubs: - self._stubs['list_workflow_templates'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates', - request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize, - response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize, - ) - return self._stubs['list_workflow_templates'] - - @property - def delete_workflow_template(self) -> Callable[ - [workflow_templates.DeleteWorkflowTemplateRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete workflow template method over gRPC. - - Deletes a workflow template. It does not cancel in- - rogress workflows. - - Returns: - Callable[[~.DeleteWorkflowTemplateRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_workflow_template' not in self._stubs: - self._stubs['delete_workflow_template'] = self.grpc_channel.unary_unary( - '/google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate', - request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_workflow_template'] - - -__all__ = ( - 'WorkflowTemplateServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/__init__.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/__init__.py deleted file mode 100644 index de792217..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/__init__.py +++ /dev/null @@ -1,197 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .autoscaling_policies import ( - AutoscalingPolicy, - BasicAutoscalingAlgorithm, - BasicYarnAutoscalingConfig, - CreateAutoscalingPolicyRequest, - DeleteAutoscalingPolicyRequest, - GetAutoscalingPolicyRequest, - InstanceGroupAutoscalingPolicyConfig, - ListAutoscalingPoliciesRequest, - ListAutoscalingPoliciesResponse, - UpdateAutoscalingPolicyRequest, -) -from .clusters import ( - AcceleratorConfig, - AutoscalingConfig, - Cluster, - ClusterConfig, - ClusterMetrics, - ClusterStatus, - CreateClusterRequest, - DeleteClusterRequest, - DiagnoseClusterRequest, - DiagnoseClusterResults, - DiskConfig, - EncryptionConfig, - EndpointConfig, - GceClusterConfig, - GetClusterRequest, - GkeClusterConfig, - InstanceGroupConfig, - KerberosConfig, - LifecycleConfig, - ListClustersRequest, - ListClustersResponse, - ManagedGroupConfig, - NodeInitializationAction, - ReservationAffinity, - SecurityConfig, - SoftwareConfig, - UpdateClusterRequest, -) -from .jobs import ( - CancelJobRequest, - DeleteJobRequest, - GetJobRequest, - HadoopJob, - HiveJob, - Job, - JobMetadata, - JobPlacement, - JobReference, - JobScheduling, - JobStatus, - ListJobsRequest, - ListJobsResponse, - LoggingConfig, - PigJob, - PrestoJob, - PySparkJob, - QueryList, - SparkJob, - SparkRJob, - SparkSqlJob, - SubmitJobRequest, - UpdateJobRequest, - YarnApplication, -) -from .operations import ( - ClusterOperationMetadata, - ClusterOperationStatus, -) -from .workflow_templates import ( - ClusterOperation, - ClusterSelector, - CreateWorkflowTemplateRequest, - DeleteWorkflowTemplateRequest, - GetWorkflowTemplateRequest, - InstantiateInlineWorkflowTemplateRequest, - InstantiateWorkflowTemplateRequest, - ListWorkflowTemplatesRequest, - ListWorkflowTemplatesResponse, - ManagedCluster, - OrderedJob, - ParameterValidation, - RegexValidation, - TemplateParameter, - UpdateWorkflowTemplateRequest, - ValueValidation, - WorkflowGraph, - WorkflowMetadata, - WorkflowNode, - WorkflowTemplate, - WorkflowTemplatePlacement, -) - -__all__ = ( - 'AutoscalingPolicy', - 'BasicAutoscalingAlgorithm', - 'BasicYarnAutoscalingConfig', - 'CreateAutoscalingPolicyRequest', - 'DeleteAutoscalingPolicyRequest', - 'GetAutoscalingPolicyRequest', - 'InstanceGroupAutoscalingPolicyConfig', - 'ListAutoscalingPoliciesRequest', - 'ListAutoscalingPoliciesResponse', - 'UpdateAutoscalingPolicyRequest', - 'AcceleratorConfig', - 'AutoscalingConfig', - 'Cluster', - 'ClusterConfig', - 'ClusterMetrics', - 'ClusterStatus', - 'CreateClusterRequest', - 'DeleteClusterRequest', - 'DiagnoseClusterRequest', - 'DiagnoseClusterResults', - 'DiskConfig', - 'EncryptionConfig', - 'EndpointConfig', - 'GceClusterConfig', - 'GetClusterRequest', - 'GkeClusterConfig', - 'InstanceGroupConfig', - 'KerberosConfig', - 'LifecycleConfig', - 'ListClustersRequest', - 'ListClustersResponse', - 'ManagedGroupConfig', - 'NodeInitializationAction', - 'ReservationAffinity', - 'SecurityConfig', - 'SoftwareConfig', - 'UpdateClusterRequest', - 'CancelJobRequest', - 'DeleteJobRequest', - 'GetJobRequest', - 'HadoopJob', - 'HiveJob', - 'Job', - 'JobMetadata', - 'JobPlacement', - 'JobReference', - 'JobScheduling', - 'JobStatus', - 'ListJobsRequest', - 'ListJobsResponse', - 'LoggingConfig', - 'PigJob', - 'PrestoJob', - 'PySparkJob', - 'QueryList', - 'SparkJob', - 'SparkRJob', - 'SparkSqlJob', - 'SubmitJobRequest', - 'UpdateJobRequest', - 'YarnApplication', - 'ClusterOperationMetadata', - 'ClusterOperationStatus', - 'Component', - 'ClusterOperation', - 'ClusterSelector', - 'CreateWorkflowTemplateRequest', - 'DeleteWorkflowTemplateRequest', - 'GetWorkflowTemplateRequest', - 'InstantiateInlineWorkflowTemplateRequest', - 'InstantiateWorkflowTemplateRequest', - 'ListWorkflowTemplatesRequest', - 'ListWorkflowTemplatesResponse', - 'ManagedCluster', - 'OrderedJob', - 'ParameterValidation', - 'RegexValidation', - 'TemplateParameter', - 'UpdateWorkflowTemplateRequest', - 'ValueValidation', - 'WorkflowGraph', - 'WorkflowMetadata', - 'WorkflowNode', - 'WorkflowTemplate', - 'WorkflowTemplatePlacement', -) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py deleted file mode 100644 index 7e7fd933..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py +++ /dev/null @@ -1,416 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.dataproc.v1beta2', - manifest={ - 'AutoscalingPolicy', - 'BasicAutoscalingAlgorithm', - 'BasicYarnAutoscalingConfig', - 'InstanceGroupAutoscalingPolicyConfig', - 'CreateAutoscalingPolicyRequest', - 'GetAutoscalingPolicyRequest', - 'UpdateAutoscalingPolicyRequest', - 'DeleteAutoscalingPolicyRequest', - 'ListAutoscalingPoliciesRequest', - 'ListAutoscalingPoliciesResponse', - }, -) - - -class AutoscalingPolicy(proto.Message): - r"""Describes an autoscaling policy for Dataproc cluster - autoscaler. - - Attributes: - id (str): - Required. The policy id. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). Cannot begin or end with - underscore or hyphen. Must consist of between 3 and 50 - characters. - name (str): - Output only. The "resource name" of the autoscaling policy, - as described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies``, the - resource name of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies``, the - resource name of the policy has the following format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - basic_algorithm (google.cloud.dataproc_v1beta2.types.BasicAutoscalingAlgorithm): - - worker_config (google.cloud.dataproc_v1beta2.types.InstanceGroupAutoscalingPolicyConfig): - Required. Describes how the autoscaler will - operate for primary workers. - secondary_worker_config (google.cloud.dataproc_v1beta2.types.InstanceGroupAutoscalingPolicyConfig): - Optional. Describes how the autoscaler will - operate for secondary workers. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - name = proto.Field( - proto.STRING, - number=2, - ) - basic_algorithm = proto.Field( - proto.MESSAGE, - number=3, - oneof='algorithm', - message='BasicAutoscalingAlgorithm', - ) - worker_config = proto.Field( - proto.MESSAGE, - number=4, - message='InstanceGroupAutoscalingPolicyConfig', - ) - secondary_worker_config = proto.Field( - proto.MESSAGE, - number=5, - message='InstanceGroupAutoscalingPolicyConfig', - ) - - -class BasicAutoscalingAlgorithm(proto.Message): - r"""Basic algorithm for autoscaling. - Attributes: - yarn_config (google.cloud.dataproc_v1beta2.types.BasicYarnAutoscalingConfig): - Required. YARN autoscaling configuration. - cooldown_period (google.protobuf.duration_pb2.Duration): - Optional. Duration between scaling events. A scaling period - starts after the update operation from the previous event - has completed. - - Bounds: [2m, 1d]. Default: 2m. - """ - - yarn_config = proto.Field( - proto.MESSAGE, - number=1, - message='BasicYarnAutoscalingConfig', - ) - cooldown_period = proto.Field( - proto.MESSAGE, - number=2, - message=duration_pb2.Duration, - ) - - -class BasicYarnAutoscalingConfig(proto.Message): - r"""Basic autoscaling configurations for YARN. - Attributes: - graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): - Required. Timeout for YARN graceful decommissioning of Node - Managers. Specifies the duration to wait for jobs to - complete before forcefully removing workers (and potentially - interrupting jobs). Only applicable to downscaling - operations. - - Bounds: [0s, 1d]. - scale_up_factor (float): - Required. Fraction of average YARN pending memory in the - last cooldown period for which to add workers. A scale-up - factor of 1.0 will result in scaling up so that there is no - pending memory remaining after the update (more aggressive - scaling). A scale-up factor closer to 0 will result in a - smaller magnitude of scaling up (less aggressive scaling). - See `How autoscaling - works `__ - for more information. - - Bounds: [0.0, 1.0]. - scale_down_factor (float): - Required. Fraction of average YARN pending memory in the - last cooldown period for which to remove workers. A - scale-down factor of 1 will result in scaling down so that - there is no available memory remaining after the update - (more aggressive scaling). A scale-down factor of 0 disables - removing workers, which can be beneficial for autoscaling a - single job. See `How autoscaling - works `__ - for more information. - - Bounds: [0.0, 1.0]. - scale_up_min_worker_fraction (float): - Optional. Minimum scale-up threshold as a fraction of total - cluster size before scaling occurs. For example, in a - 20-worker cluster, a threshold of 0.1 means the autoscaler - must recommend at least a 2-worker scale-up for the cluster - to scale. A threshold of 0 means the autoscaler will scale - up on any recommended change. - - Bounds: [0.0, 1.0]. Default: 0.0. - scale_down_min_worker_fraction (float): - Optional. Minimum scale-down threshold as a fraction of - total cluster size before scaling occurs. For example, in a - 20-worker cluster, a threshold of 0.1 means the autoscaler - must recommend at least a 2 worker scale-down for the - cluster to scale. A threshold of 0 means the autoscaler will - scale down on any recommended change. - - Bounds: [0.0, 1.0]. Default: 0.0. - """ - - graceful_decommission_timeout = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - scale_up_factor = proto.Field( - proto.DOUBLE, - number=1, - ) - scale_down_factor = proto.Field( - proto.DOUBLE, - number=2, - ) - scale_up_min_worker_fraction = proto.Field( - proto.DOUBLE, - number=3, - ) - scale_down_min_worker_fraction = proto.Field( - proto.DOUBLE, - number=4, - ) - - -class InstanceGroupAutoscalingPolicyConfig(proto.Message): - r"""Configuration for the size bounds of an instance group, - including its proportional size to other groups. - - Attributes: - min_instances (int): - Optional. Minimum number of instances for this group. - - Primary workers - Bounds: [2, max_instances]. Default: 2. - Secondary workers - Bounds: [0, max_instances]. Default: 0. - max_instances (int): - Optional. Maximum number of instances for this group. - Required for primary workers. Note that by default, clusters - will not use secondary workers. Required for secondary - workers if the minimum secondary instances is set. - - Primary workers - Bounds: [min_instances, ). Required. - Secondary workers - Bounds: [min_instances, ). Default: 0. - weight (int): - Optional. Weight for the instance group, which is used to - determine the fraction of total workers in the cluster from - this instance group. For example, if primary workers have - weight 2, and secondary workers have weight 1, the cluster - will have approximately 2 primary workers for each secondary - worker. - - The cluster may not reach the specified balance if - constrained by min/max bounds or other autoscaling settings. - For example, if ``max_instances`` for secondary workers is - 0, then only primary workers will be added. The cluster can - also be out of balance when created. - - If weight is not set on any instance group, the cluster will - default to equal weight for all groups: the cluster will - attempt to maintain an equal number of workers in each group - within the configured size bounds for each group. If weight - is set for one group only, the cluster will default to zero - weight on the unset group. For example if weight is set only - on primary workers, the cluster will use primary workers - only and no secondary workers. - """ - - min_instances = proto.Field( - proto.INT32, - number=1, - ) - max_instances = proto.Field( - proto.INT32, - number=2, - ) - weight = proto.Field( - proto.INT32, - number=3, - ) - - -class CreateAutoscalingPolicyRequest(proto.Message): - r"""A request to create an autoscaling policy. - Attributes: - parent (str): - Required. The "resource name" of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.create``, the - resource name has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.create``, - the resource name has the following format: - ``projects/{project_id}/locations/{location}`` - policy (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): - Required. The autoscaling policy to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - policy = proto.Field( - proto.MESSAGE, - number=2, - message='AutoscalingPolicy', - ) - - -class GetAutoscalingPolicyRequest(proto.Message): - r"""A request to fetch an autoscaling policy. - Attributes: - name (str): - Required. The "resource name" of the autoscaling policy, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.get``, the - resource name of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies.get``, the - resource name of the policy has the following format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class UpdateAutoscalingPolicyRequest(proto.Message): - r"""A request to update an autoscaling policy. - Attributes: - policy (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): - Required. The updated autoscaling policy. - """ - - policy = proto.Field( - proto.MESSAGE, - number=1, - message='AutoscalingPolicy', - ) - - -class DeleteAutoscalingPolicyRequest(proto.Message): - r"""A request to delete an autoscaling policy. - Autoscaling policies in use by one or more clusters will not be - deleted. - - Attributes: - name (str): - Required. The "resource name" of the autoscaling policy, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.delete``, the - resource name of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies.delete``, - the resource name of the policy has the following format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListAutoscalingPoliciesRequest(proto.Message): - r"""A request to list autoscaling policies in a project. - Attributes: - parent (str): - Required. The "resource name" of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.list``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.list``, the - resource name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - page_size (int): - Optional. The maximum number of results to - return in each response. Must be less than or - equal to 1000. Defaults to 100. - page_token (str): - Optional. The page token, returned by a - previous call, to request the next page of - results. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - - -class ListAutoscalingPoliciesResponse(proto.Message): - r"""A response to a request to list autoscaling policies in a - project. - - Attributes: - policies (Sequence[google.cloud.dataproc_v1beta2.types.AutoscalingPolicy]): - Output only. Autoscaling policies list. - next_page_token (str): - Output only. This token is included in the - response if there are more results to fetch. - """ - - @property - def raw_page(self): - return self - - policies = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='AutoscalingPolicy', - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/clusters.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/clusters.py deleted file mode 100644 index d428148e..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/clusters.py +++ /dev/null @@ -1,1545 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.dataproc_v1beta2.types import shared -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.dataproc.v1beta2', - manifest={ - 'Cluster', - 'ClusterConfig', - 'GkeClusterConfig', - 'EndpointConfig', - 'AutoscalingConfig', - 'EncryptionConfig', - 'GceClusterConfig', - 'InstanceGroupConfig', - 'ManagedGroupConfig', - 'AcceleratorConfig', - 'DiskConfig', - 'LifecycleConfig', - 'SecurityConfig', - 'KerberosConfig', - 'NodeInitializationAction', - 'ClusterStatus', - 'SoftwareConfig', - 'ClusterMetrics', - 'CreateClusterRequest', - 'UpdateClusterRequest', - 'DeleteClusterRequest', - 'GetClusterRequest', - 'ListClustersRequest', - 'ListClustersResponse', - 'DiagnoseClusterRequest', - 'DiagnoseClusterResults', - 'ReservationAffinity', - }, -) - - -class Cluster(proto.Message): - r"""Describes the identifying information, config, and status of - a cluster of Compute Engine instances. - - Attributes: - project_id (str): - Required. The Google Cloud Platform project - ID that the cluster belongs to. - cluster_name (str): - Required. The cluster name. Cluster names - within a project must be unique. Names of - deleted clusters can be reused. - config (google.cloud.dataproc_v1beta2.types.ClusterConfig): - Required. The cluster config. Note that - Dataproc may set default values, and values may - change when clusters are updated. - labels (Sequence[google.cloud.dataproc_v1beta2.types.Cluster.LabelsEntry]): - Optional. The labels to associate with this cluster. Label - **keys** must contain 1 to 63 characters, and must conform - to `RFC 1035 `__. - Label **values** may be empty, but, if present, must contain - 1 to 63 characters, and must conform to `RFC - 1035 `__. No more than - 32 labels can be associated with a cluster. - status (google.cloud.dataproc_v1beta2.types.ClusterStatus): - Output only. Cluster status. - status_history (Sequence[google.cloud.dataproc_v1beta2.types.ClusterStatus]): - Output only. The previous cluster status. - cluster_uuid (str): - Output only. A cluster UUID (Unique Universal - Identifier). Dataproc generates this value when - it creates the cluster. - metrics (google.cloud.dataproc_v1beta2.types.ClusterMetrics): - Output only. Contains cluster daemon metrics such as HDFS - and YARN stats. - - **Beta Feature**: This report is available for testing - purposes only. It may be changed before final release. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - cluster_name = proto.Field( - proto.STRING, - number=2, - ) - config = proto.Field( - proto.MESSAGE, - number=3, - message='ClusterConfig', - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=8, - ) - status = proto.Field( - proto.MESSAGE, - number=4, - message='ClusterStatus', - ) - status_history = proto.RepeatedField( - proto.MESSAGE, - number=7, - message='ClusterStatus', - ) - cluster_uuid = proto.Field( - proto.STRING, - number=6, - ) - metrics = proto.Field( - proto.MESSAGE, - number=9, - message='ClusterMetrics', - ) - - -class ClusterConfig(proto.Message): - r"""The cluster config. - Attributes: - config_bucket (str): - Optional. A Cloud Storage bucket used to stage job - dependencies, config files, and job driver console output. - If you do not specify a staging bucket, Cloud Dataproc will - determine a Cloud Storage location (US, ASIA, or EU) for - your cluster's staging bucket according to the Compute - Engine zone where your cluster is deployed, and then create - and manage this project-level, per-location bucket (see - `Dataproc staging - bucket `__). - temp_bucket (str): - Optional. A Cloud Storage bucket used to - store ephemeral cluster and jobs data, such as - Spark and MapReduce history files. If you do not - specify a temp bucket, Dataproc will determine a - Cloud Storage location (US, ASIA, or EU) for - your cluster's temp bucket according to the - Compute Engine zone where your cluster is - deployed, and then create and manage this - project-level, per-location bucket. The default - bucket has a TTL of 90 days, but you can use any - TTL (or none) if you specify a bucket. - gce_cluster_config (google.cloud.dataproc_v1beta2.types.GceClusterConfig): - Optional. The shared Compute Engine config - settings for all instances in a cluster. - master_config (google.cloud.dataproc_v1beta2.types.InstanceGroupConfig): - Optional. The Compute Engine config settings - for the master instance in a cluster. - worker_config (google.cloud.dataproc_v1beta2.types.InstanceGroupConfig): - Optional. The Compute Engine config settings - for worker instances in a cluster. - secondary_worker_config (google.cloud.dataproc_v1beta2.types.InstanceGroupConfig): - Optional. The Compute Engine config settings - for additional worker instances in a cluster. - software_config (google.cloud.dataproc_v1beta2.types.SoftwareConfig): - Optional. The config settings for software - inside the cluster. - lifecycle_config (google.cloud.dataproc_v1beta2.types.LifecycleConfig): - Optional. The config setting for auto delete - cluster schedule. - initialization_actions (Sequence[google.cloud.dataproc_v1beta2.types.NodeInitializationAction]): - Optional. Commands to execute on each node after config is - completed. By default, executables are run on master and all - worker nodes. You can test a node's role metadata to run an - executable on a master or worker node, as shown below using - ``curl`` (you can also use ``wget``): - - :: - - ROLE=$(curl -H Metadata-Flavor:Google - http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) - if [[ "${ROLE}" == 'Master' ]]; then - ... master specific actions ... - else - ... worker specific actions ... - fi - encryption_config (google.cloud.dataproc_v1beta2.types.EncryptionConfig): - Optional. Encryption settings for the - cluster. - autoscaling_config (google.cloud.dataproc_v1beta2.types.AutoscalingConfig): - Optional. Autoscaling config for the policy - associated with the cluster. Cluster does not - autoscale if this field is unset. - endpoint_config (google.cloud.dataproc_v1beta2.types.EndpointConfig): - Optional. Port/endpoint configuration for - this cluster - security_config (google.cloud.dataproc_v1beta2.types.SecurityConfig): - Optional. Security related configuration. - gke_cluster_config (google.cloud.dataproc_v1beta2.types.GkeClusterConfig): - Optional. The Kubernetes Engine config for Dataproc clusters - deployed to Kubernetes. Setting this is considered mutually - exclusive with Compute Engine-based options such as - ``gce_cluster_config``, ``master_config``, - ``worker_config``, ``secondary_worker_config``, and - ``autoscaling_config``. - """ - - config_bucket = proto.Field( - proto.STRING, - number=1, - ) - temp_bucket = proto.Field( - proto.STRING, - number=2, - ) - gce_cluster_config = proto.Field( - proto.MESSAGE, - number=8, - message='GceClusterConfig', - ) - master_config = proto.Field( - proto.MESSAGE, - number=9, - message='InstanceGroupConfig', - ) - worker_config = proto.Field( - proto.MESSAGE, - number=10, - message='InstanceGroupConfig', - ) - secondary_worker_config = proto.Field( - proto.MESSAGE, - number=12, - message='InstanceGroupConfig', - ) - software_config = proto.Field( - proto.MESSAGE, - number=13, - message='SoftwareConfig', - ) - lifecycle_config = proto.Field( - proto.MESSAGE, - number=14, - message='LifecycleConfig', - ) - initialization_actions = proto.RepeatedField( - proto.MESSAGE, - number=11, - message='NodeInitializationAction', - ) - encryption_config = proto.Field( - proto.MESSAGE, - number=15, - message='EncryptionConfig', - ) - autoscaling_config = proto.Field( - proto.MESSAGE, - number=16, - message='AutoscalingConfig', - ) - endpoint_config = proto.Field( - proto.MESSAGE, - number=17, - message='EndpointConfig', - ) - security_config = proto.Field( - proto.MESSAGE, - number=18, - message='SecurityConfig', - ) - gke_cluster_config = proto.Field( - proto.MESSAGE, - number=19, - message='GkeClusterConfig', - ) - - -class GkeClusterConfig(proto.Message): - r"""The GKE config for this cluster. - Attributes: - namespaced_gke_deployment_target (google.cloud.dataproc_v1beta2.types.GkeClusterConfig.NamespacedGkeDeploymentTarget): - Optional. A target for the deployment. - """ - - class NamespacedGkeDeploymentTarget(proto.Message): - r"""A full, namespace-isolated deployment target for an existing - GKE cluster. - - Attributes: - target_gke_cluster (str): - Optional. The target GKE cluster to deploy to. Format: - 'projects/{project}/locations/{location}/clusters/{cluster_id}' - cluster_namespace (str): - Optional. A namespace within the GKE cluster - to deploy into. - """ - - target_gke_cluster = proto.Field( - proto.STRING, - number=1, - ) - cluster_namespace = proto.Field( - proto.STRING, - number=2, - ) - - namespaced_gke_deployment_target = proto.Field( - proto.MESSAGE, - number=1, - message=NamespacedGkeDeploymentTarget, - ) - - -class EndpointConfig(proto.Message): - r"""Endpoint config for this cluster - Attributes: - http_ports (Sequence[google.cloud.dataproc_v1beta2.types.EndpointConfig.HttpPortsEntry]): - Output only. The map of port descriptions to URLs. Will only - be populated if enable_http_port_access is true. - enable_http_port_access (bool): - Optional. If true, enable http access to - specific ports on the cluster from external - sources. Defaults to false. - """ - - http_ports = proto.MapField( - proto.STRING, - proto.STRING, - number=1, - ) - enable_http_port_access = proto.Field( - proto.BOOL, - number=2, - ) - - -class AutoscalingConfig(proto.Message): - r"""Autoscaling Policy config associated with the cluster. - Attributes: - policy_uri (str): - Optional. The autoscaling policy used by the cluster. - - Only resource names including projectid and location - (region) are valid. Examples: - - - ``https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`` - - ``projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`` - - Note that the policy must be in the same project and - Dataproc region. - """ - - policy_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class EncryptionConfig(proto.Message): - r"""Encryption settings for the cluster. - Attributes: - gce_pd_kms_key_name (str): - Optional. The Cloud KMS key name to use for - PD disk encryption for all instances in the - cluster. - """ - - gce_pd_kms_key_name = proto.Field( - proto.STRING, - number=1, - ) - - -class GceClusterConfig(proto.Message): - r"""Common config settings for resources of Compute Engine - cluster instances, applicable to all instances in the cluster. - - Attributes: - zone_uri (str): - Optional. The zone where the Compute Engine cluster will be - located. On a create request, it is required in the "global" - region. If omitted in a non-global Dataproc region, the - service will pick a zone in the corresponding Compute Engine - region. On a get request, zone will always be present. - - A full URL, partial URI, or short name are valid. Examples: - - - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`` - - ``projects/[project_id]/zones/[zone]`` - - ``us-central1-f`` - network_uri (str): - Optional. The Compute Engine network to be used for machine - communications. Cannot be specified with subnetwork_uri. If - neither ``network_uri`` nor ``subnetwork_uri`` is specified, - the "default" network of the project is used, if it exists. - Cannot be a "Custom Subnet Network" (see `Using - Subnetworks `__ - for more information). - - A full URL, partial URI, or short name are valid. Examples: - - - ``https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`` - - ``projects/[project_id]/regions/global/default`` - - ``default`` - subnetwork_uri (str): - Optional. The Compute Engine subnetwork to be used for - machine communications. Cannot be specified with - network_uri. - - A full URL, partial URI, or short name are valid. Examples: - - - ``https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0`` - - ``projects/[project_id]/regions/us-east1/subnetworks/sub0`` - - ``sub0`` - internal_ip_only (bool): - Optional. If true, all instances in the cluster will only - have internal IP addresses. By default, clusters are not - restricted to internal IP addresses, and will have ephemeral - external IP addresses assigned to each instance. This - ``internal_ip_only`` restriction can only be enabled for - subnetwork enabled networks, and all off-cluster - dependencies must be configured to be accessible without - external IP addresses. - service_account (str): - Optional. The `Dataproc service - account `__ - (also see `VM Data Plane - identity `__) - used by Dataproc cluster VM instances to access Google Cloud - Platform services. - - If not specified, the `Compute Engine default service - account `__ - is used. - service_account_scopes (Sequence[str]): - Optional. The URIs of service account scopes to be included - in Compute Engine instances. The following base set of - scopes is always included: - - - https://www.googleapis.com/auth/cloud.useraccounts.readonly - - https://www.googleapis.com/auth/devstorage.read_write - - https://www.googleapis.com/auth/logging.write - - If no scopes are specified, the following defaults are also - provided: - - - https://www.googleapis.com/auth/bigquery - - https://www.googleapis.com/auth/bigtable.admin.table - - https://www.googleapis.com/auth/bigtable.data - - https://www.googleapis.com/auth/devstorage.full_control - tags (Sequence[str]): - The Compute Engine tags to add to all instances (see - `Tagging - instances `__). - metadata (Sequence[google.cloud.dataproc_v1beta2.types.GceClusterConfig.MetadataEntry]): - The Compute Engine metadata entries to add to all instances - (see `Project and instance - metadata `__). - reservation_affinity (google.cloud.dataproc_v1beta2.types.ReservationAffinity): - Optional. Reservation Affinity for consuming - Zonal reservation. - """ - - zone_uri = proto.Field( - proto.STRING, - number=1, - ) - network_uri = proto.Field( - proto.STRING, - number=2, - ) - subnetwork_uri = proto.Field( - proto.STRING, - number=6, - ) - internal_ip_only = proto.Field( - proto.BOOL, - number=7, - ) - service_account = proto.Field( - proto.STRING, - number=8, - ) - service_account_scopes = proto.RepeatedField( - proto.STRING, - number=3, - ) - tags = proto.RepeatedField( - proto.STRING, - number=4, - ) - metadata = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - reservation_affinity = proto.Field( - proto.MESSAGE, - number=11, - message='ReservationAffinity', - ) - - -class InstanceGroupConfig(proto.Message): - r"""The config settings for Compute Engine resources in - an instance group, such as a master or worker group. - - Attributes: - num_instances (int): - Optional. The number of VM instances in the - instance group. For master instance groups, must - be set to 1. - instance_names (Sequence[str]): - Output only. The list of instance names. Dataproc derives - the names from ``cluster_name``, ``num_instances``, and the - instance group. - image_uri (str): - Optional. The Compute Engine image resource used for cluster - instances. - - The URI can represent an image or image family. - - Image examples: - - - ``https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`` - - ``projects/[project_id]/global/images/[image-id]`` - - ``image-id`` - - Image family examples. Dataproc will use the most recent - image from the family: - - - ``https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`` - - ``projects/[project_id]/global/images/family/[custom-image-family-name]`` - - If the URI is unspecified, it will be inferred from - ``SoftwareConfig.image_version`` or the system default. - machine_type_uri (str): - Optional. The Compute Engine machine type used for cluster - instances. - - A full URL, partial URI, or short name are valid. Examples: - - - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` - - ``projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` - - ``n1-standard-2`` - - **Auto Zone Exception**: If you are using the Dataproc `Auto - Zone - Placement `__ - feature, you must use the short name of the machine type - resource, for example, ``n1-standard-2``. - disk_config (google.cloud.dataproc_v1beta2.types.DiskConfig): - Optional. Disk option config settings. - is_preemptible (bool): - Output only. Specifies that this instance - group contains preemptible instances. - preemptibility (google.cloud.dataproc_v1beta2.types.InstanceGroupConfig.Preemptibility): - Optional. Specifies the preemptibility of the instance - group. - - The default value for master and worker groups is - ``NON_PREEMPTIBLE``. This default cannot be changed. - - The default value for secondary instances is - ``PREEMPTIBLE``. - managed_group_config (google.cloud.dataproc_v1beta2.types.ManagedGroupConfig): - Output only. The config for Compute Engine - Instance Group Manager that manages this group. - This is only used for preemptible instance - groups. - accelerators (Sequence[google.cloud.dataproc_v1beta2.types.AcceleratorConfig]): - Optional. The Compute Engine accelerator - configuration for these instances. - min_cpu_platform (str): - Specifies the minimum cpu platform for the Instance Group. - See `Dataproc -> Minimum CPU - Platform `__. - """ - class Preemptibility(proto.Enum): - r"""Controls the use of [preemptible instances] - (https://cloud.google.com/compute/docs/instances/preemptible) within - the group. - """ - PREEMPTIBILITY_UNSPECIFIED = 0 - NON_PREEMPTIBLE = 1 - PREEMPTIBLE = 2 - - num_instances = proto.Field( - proto.INT32, - number=1, - ) - instance_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - image_uri = proto.Field( - proto.STRING, - number=3, - ) - machine_type_uri = proto.Field( - proto.STRING, - number=4, - ) - disk_config = proto.Field( - proto.MESSAGE, - number=5, - message='DiskConfig', - ) - is_preemptible = proto.Field( - proto.BOOL, - number=6, - ) - preemptibility = proto.Field( - proto.ENUM, - number=10, - enum=Preemptibility, - ) - managed_group_config = proto.Field( - proto.MESSAGE, - number=7, - message='ManagedGroupConfig', - ) - accelerators = proto.RepeatedField( - proto.MESSAGE, - number=8, - message='AcceleratorConfig', - ) - min_cpu_platform = proto.Field( - proto.STRING, - number=9, - ) - - -class ManagedGroupConfig(proto.Message): - r"""Specifies the resources used to actively manage an instance - group. - - Attributes: - instance_template_name (str): - Output only. The name of the Instance - Template used for the Managed Instance Group. - instance_group_manager_name (str): - Output only. The name of the Instance Group - Manager for this group. - """ - - instance_template_name = proto.Field( - proto.STRING, - number=1, - ) - instance_group_manager_name = proto.Field( - proto.STRING, - number=2, - ) - - -class AcceleratorConfig(proto.Message): - r"""Specifies the type and number of accelerator cards attached to the - instances of an instance group (see `GPUs on Compute - Engine `__). - - Attributes: - accelerator_type_uri (str): - Full URL, partial URI, or short name of the accelerator type - resource to expose to this instance. See `Compute Engine - AcceleratorTypes `__ - - Examples - - - ``https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` - - ``projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` - - ``nvidia-tesla-k80`` - - **Auto Zone Exception**: If you are using the Dataproc `Auto - Zone - Placement `__ - feature, you must use the short name of the accelerator type - resource, for example, ``nvidia-tesla-k80``. - accelerator_count (int): - The number of the accelerator cards of this - type exposed to this instance. - """ - - accelerator_type_uri = proto.Field( - proto.STRING, - number=1, - ) - accelerator_count = proto.Field( - proto.INT32, - number=2, - ) - - -class DiskConfig(proto.Message): - r"""Specifies the config of disk options for a group of VM - instances. - - Attributes: - boot_disk_type (str): - Optional. Type of the boot disk (default is - "pd-standard"). Valid values: "pd-ssd" - (Persistent Disk Solid State Drive) or "pd- - standard" (Persistent Disk Hard Disk Drive). - boot_disk_size_gb (int): - Optional. Size in GB of the boot disk - (default is 500GB). - num_local_ssds (int): - Number of attached SSDs, from 0 to 4 (default is 0). If SSDs - are not attached, the boot disk is used to store runtime - logs and - `HDFS `__ - data. If one or more SSDs are attached, this runtime bulk - data is spread across them, and the boot disk contains only - basic config and installed binaries. - """ - - boot_disk_type = proto.Field( - proto.STRING, - number=3, - ) - boot_disk_size_gb = proto.Field( - proto.INT32, - number=1, - ) - num_local_ssds = proto.Field( - proto.INT32, - number=2, - ) - - -class LifecycleConfig(proto.Message): - r"""Specifies the cluster auto-delete schedule configuration. - Attributes: - idle_delete_ttl (google.protobuf.duration_pb2.Duration): - Optional. The duration to keep the cluster alive while - idling (when no jobs are running). Passing this threshold - will cause the cluster to be deleted. Minimum value is 10 - minutes; maximum value is 14 days (see JSON representation - of - `Duration `__. - auto_delete_time (google.protobuf.timestamp_pb2.Timestamp): - Optional. The time when cluster will be auto-deleted. (see - JSON representation of - `Timestamp `__). - auto_delete_ttl (google.protobuf.duration_pb2.Duration): - Optional. The lifetime duration of cluster. The cluster will - be auto-deleted at the end of this period. Minimum value is - 10 minutes; maximum value is 14 days (see JSON - representation of - `Duration `__). - idle_start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time when cluster became idle (most recent - job finished) and became eligible for deletion due to - idleness (see JSON representation of - `Timestamp `__). - """ - - idle_delete_ttl = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - auto_delete_time = proto.Field( - proto.MESSAGE, - number=2, - oneof='ttl', - message=timestamp_pb2.Timestamp, - ) - auto_delete_ttl = proto.Field( - proto.MESSAGE, - number=3, - oneof='ttl', - message=duration_pb2.Duration, - ) - idle_start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class SecurityConfig(proto.Message): - r"""Security related configuration, including encryption, - Kerberos, etc. - - Attributes: - kerberos_config (google.cloud.dataproc_v1beta2.types.KerberosConfig): - Kerberos related configuration. - """ - - kerberos_config = proto.Field( - proto.MESSAGE, - number=1, - message='KerberosConfig', - ) - - -class KerberosConfig(proto.Message): - r"""Specifies Kerberos related configuration. - Attributes: - enable_kerberos (bool): - Optional. Flag to indicate whether to - Kerberize the cluster (default: false). Set this - field to true to enable Kerberos on a cluster. - root_principal_password_uri (str): - Required. The Cloud Storage URI of a KMS - encrypted file containing the root principal - password. - kms_key_uri (str): - Required. The uri of the KMS key used to - encrypt various sensitive files. - keystore_uri (str): - Optional. The Cloud Storage URI of the - keystore file used for SSL encryption. If not - provided, Dataproc will provide a self-signed - certificate. - truststore_uri (str): - Optional. The Cloud Storage URI of the - truststore file used for SSL encryption. If not - provided, Dataproc will provide a self-signed - certificate. - keystore_password_uri (str): - Optional. The Cloud Storage URI of a KMS - encrypted file containing the password to the - user provided keystore. For the self-signed - certificate, this password is generated by - Dataproc. - key_password_uri (str): - Optional. The Cloud Storage URI of a KMS - encrypted file containing the password to the - user provided key. For the self-signed - certificate, this password is generated by - Dataproc. - truststore_password_uri (str): - Optional. The Cloud Storage URI of a KMS - encrypted file containing the password to the - user provided truststore. For the self-signed - certificate, this password is generated by - Dataproc. - cross_realm_trust_realm (str): - Optional. The remote realm the Dataproc on- - luster KDC will trust, should the user enable - cross realm trust. - cross_realm_trust_kdc (str): - Optional. The KDC (IP or hostname) for the - remote trusted realm in a cross realm trust - relationship. - cross_realm_trust_admin_server (str): - Optional. The admin server (IP or hostname) - for the remote trusted realm in a cross realm - trust relationship. - cross_realm_trust_shared_password_uri (str): - Optional. The Cloud Storage URI of a KMS - encrypted file containing the shared password - between the on-cluster Kerberos realm and the - remote trusted realm, in a cross realm trust - relationship. - kdc_db_key_uri (str): - Optional. The Cloud Storage URI of a KMS - encrypted file containing the master key of the - KDC database. - tgt_lifetime_hours (int): - Optional. The lifetime of the ticket granting - ticket, in hours. If not specified, or user - specifies 0, then default value 10 will be used. - realm (str): - Optional. The name of the on-cluster Kerberos - realm. If not specified, the uppercased domain - of hostnames will be the realm. - """ - - enable_kerberos = proto.Field( - proto.BOOL, - number=1, - ) - root_principal_password_uri = proto.Field( - proto.STRING, - number=2, - ) - kms_key_uri = proto.Field( - proto.STRING, - number=3, - ) - keystore_uri = proto.Field( - proto.STRING, - number=4, - ) - truststore_uri = proto.Field( - proto.STRING, - number=5, - ) - keystore_password_uri = proto.Field( - proto.STRING, - number=6, - ) - key_password_uri = proto.Field( - proto.STRING, - number=7, - ) - truststore_password_uri = proto.Field( - proto.STRING, - number=8, - ) - cross_realm_trust_realm = proto.Field( - proto.STRING, - number=9, - ) - cross_realm_trust_kdc = proto.Field( - proto.STRING, - number=10, - ) - cross_realm_trust_admin_server = proto.Field( - proto.STRING, - number=11, - ) - cross_realm_trust_shared_password_uri = proto.Field( - proto.STRING, - number=12, - ) - kdc_db_key_uri = proto.Field( - proto.STRING, - number=13, - ) - tgt_lifetime_hours = proto.Field( - proto.INT32, - number=14, - ) - realm = proto.Field( - proto.STRING, - number=15, - ) - - -class NodeInitializationAction(proto.Message): - r"""Specifies an executable to run on a fully configured node and - a timeout period for executable completion. - - Attributes: - executable_file (str): - Required. Cloud Storage URI of executable - file. - execution_timeout (google.protobuf.duration_pb2.Duration): - Optional. Amount of time executable has to complete. Default - is 10 minutes (see JSON representation of - `Duration `__). - - Cluster creation fails with an explanatory error message - (the name of the executable that caused the error and the - exceeded timeout period) if the executable is not completed - at end of the timeout period. - """ - - executable_file = proto.Field( - proto.STRING, - number=1, - ) - execution_timeout = proto.Field( - proto.MESSAGE, - number=2, - message=duration_pb2.Duration, - ) - - -class ClusterStatus(proto.Message): - r"""The status of a cluster and its instances. - Attributes: - state (google.cloud.dataproc_v1beta2.types.ClusterStatus.State): - Output only. The cluster's state. - detail (str): - Output only. Optional details of cluster's - state. - state_start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when this state was entered (see JSON - representation of - `Timestamp `__). - substate (google.cloud.dataproc_v1beta2.types.ClusterStatus.Substate): - Output only. Additional state information - that includes status reported by the agent. - """ - class State(proto.Enum): - r"""The cluster state.""" - UNKNOWN = 0 - CREATING = 1 - RUNNING = 2 - ERROR = 3 - DELETING = 4 - UPDATING = 5 - STOPPING = 6 - STOPPED = 7 - STARTING = 8 - - class Substate(proto.Enum): - r"""The cluster substate.""" - UNSPECIFIED = 0 - UNHEALTHY = 1 - STALE_STATUS = 2 - - state = proto.Field( - proto.ENUM, - number=1, - enum=State, - ) - detail = proto.Field( - proto.STRING, - number=2, - ) - state_start_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - substate = proto.Field( - proto.ENUM, - number=4, - enum=Substate, - ) - - -class SoftwareConfig(proto.Message): - r"""Specifies the selection and config of software inside the - cluster. - - Attributes: - image_version (str): - Optional. The version of software inside the cluster. It - must be one of the supported `Dataproc - Versions `__, - such as "1.2" (including a subminor version, such as - "1.2.29"), or the `"preview" - version `__. - If unspecified, it defaults to the latest Debian version. - properties (Sequence[google.cloud.dataproc_v1beta2.types.SoftwareConfig.PropertiesEntry]): - Optional. The properties to set on daemon config files. - - Property keys are specified in ``prefix:property`` format, - for example ``core:hadoop.tmp.dir``. The following are - supported prefixes and their mappings: - - - capacity-scheduler: ``capacity-scheduler.xml`` - - core: ``core-site.xml`` - - distcp: ``distcp-default.xml`` - - hdfs: ``hdfs-site.xml`` - - hive: ``hive-site.xml`` - - mapred: ``mapred-site.xml`` - - pig: ``pig.properties`` - - spark: ``spark-defaults.conf`` - - yarn: ``yarn-site.xml`` - - For more information, see `Cluster - properties `__. - optional_components (Sequence[google.cloud.dataproc_v1beta2.types.Component]): - The set of optional components to activate on - the cluster. - """ - - image_version = proto.Field( - proto.STRING, - number=1, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - optional_components = proto.RepeatedField( - proto.ENUM, - number=3, - enum=shared.Component, - ) - - -class ClusterMetrics(proto.Message): - r"""Contains cluster daemon metrics, such as HDFS and YARN stats. - - **Beta Feature**: This report is available for testing purposes - only. It may be changed before final release. - - Attributes: - hdfs_metrics (Sequence[google.cloud.dataproc_v1beta2.types.ClusterMetrics.HdfsMetricsEntry]): - The HDFS metrics. - yarn_metrics (Sequence[google.cloud.dataproc_v1beta2.types.ClusterMetrics.YarnMetricsEntry]): - The YARN metrics. - """ - - hdfs_metrics = proto.MapField( - proto.STRING, - proto.INT64, - number=1, - ) - yarn_metrics = proto.MapField( - proto.STRING, - proto.INT64, - number=2, - ) - - -class CreateClusterRequest(proto.Message): - r"""A request to create a cluster. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - cluster (google.cloud.dataproc_v1beta2.types.Cluster): - Required. The cluster to create. - request_id (str): - Optional. A unique id used to identify the request. If the - server receives two - [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] - requests with the same id, then the second request will be - ignored and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - cluster = proto.Field( - proto.MESSAGE, - number=2, - message='Cluster', - ) - request_id = proto.Field( - proto.STRING, - number=4, - ) - - -class UpdateClusterRequest(proto.Message): - r"""A request to update a cluster. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - cluster_name (str): - Required. The cluster name. - cluster (google.cloud.dataproc_v1beta2.types.Cluster): - Required. The changes to the cluster. - graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): - Optional. Timeout for graceful YARN decomissioning. Graceful - decommissioning allows removing nodes from the cluster - without interrupting jobs in progress. Timeout specifies how - long to wait for jobs in progress to finish before - forcefully removing nodes (and potentially interrupting - jobs). Default timeout is 0 (for forceful decommission), and - the maximum allowed timeout is 1 day (see JSON - representation of - `Duration `__). - - Only supported on Dataproc image versions 1.2 and higher. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Specifies the path, relative to ``Cluster``, of - the field to update. For example, to change the number of - workers in a cluster to 5, the ``update_mask`` parameter - would be specified as - ``config.worker_config.num_instances``, and the ``PATCH`` - request body would specify the new value, as follows: - - :: - - { - "config":{ - "workerConfig":{ - "numInstances":"5" - } - } - } - - Similarly, to change the number of preemptible workers in a - cluster to 5, the ``update_mask`` parameter would be - ``config.secondary_worker_config.num_instances``, and the - ``PATCH`` request body would be set as follows: - - :: - - { - "config":{ - "secondaryWorkerConfig":{ - "numInstances":"5" - } - } - } - - Note: currently only the following fields can be updated: - - .. raw:: html - - - - - - - - - - - - - - - - - - - - - - - - - - -
MaskPurpose
labelsUpdates labels
config.worker_config.num_instancesResize primary worker - group
config.secondary_worker_config.num_instancesResize secondary - worker group
config.lifecycle_config.auto_delete_ttlReset MAX TTL - duration
config.lifecycle_config.auto_delete_timeUpdate MAX TTL - deletion timestamp
config.lifecycle_config.idle_delete_ttlUpdate Idle TTL - duration
config.autoscaling_config.policy_uriUse, stop using, or change - autoscaling policies
- request_id (str): - Optional. A unique id used to identify the request. If the - server receives two - [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] - requests with the same id, then the second request will be - ignored and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=5, - ) - cluster_name = proto.Field( - proto.STRING, - number=2, - ) - cluster = proto.Field( - proto.MESSAGE, - number=3, - message='Cluster', - ) - graceful_decommission_timeout = proto.Field( - proto.MESSAGE, - number=6, - message=duration_pb2.Duration, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=4, - message=field_mask_pb2.FieldMask, - ) - request_id = proto.Field( - proto.STRING, - number=7, - ) - - -class DeleteClusterRequest(proto.Message): - r"""A request to delete a cluster. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - cluster_name (str): - Required. The cluster name. - cluster_uuid (str): - Optional. Specifying the ``cluster_uuid`` means the RPC - should fail (with error NOT_FOUND) if cluster with specified - UUID does not exist. - request_id (str): - Optional. A unique id used to identify the request. If the - server receives two - [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] - requests with the same id, then the second request will be - ignored and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - cluster_name = proto.Field( - proto.STRING, - number=2, - ) - cluster_uuid = proto.Field( - proto.STRING, - number=4, - ) - request_id = proto.Field( - proto.STRING, - number=5, - ) - - -class GetClusterRequest(proto.Message): - r"""Request to get the resource representation for a cluster in a - project. - - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - cluster_name (str): - Required. The cluster name. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - cluster_name = proto.Field( - proto.STRING, - number=2, - ) - - -class ListClustersRequest(proto.Message): - r"""A request to list the clusters in a project. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - filter (str): - Optional. A filter constraining the clusters to list. - Filters are case-sensitive and have the following syntax: - - field = value [AND [field = value]] ... - - where **field** is one of ``status.state``, ``clusterName``, - or ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** - can be ``*`` to match all values. ``status.state`` can be - one of the following: ``ACTIVE``, ``INACTIVE``, - ``CREATING``, ``RUNNING``, ``ERROR``, ``DELETING``, or - ``UPDATING``. ``ACTIVE`` contains the ``CREATING``, - ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains - the ``DELETING`` and ``ERROR`` states. ``clusterName`` is - the name of the cluster provided at creation time. Only the - logical ``AND`` operator is supported; space-separated items - are treated as having an implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND clusterName = mycluster AND - labels.env = staging AND labels.starred = \* - page_size (int): - Optional. The standard List page size. - page_token (str): - Optional. The standard List page token. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=4, - ) - filter = proto.Field( - proto.STRING, - number=5, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - - -class ListClustersResponse(proto.Message): - r"""The list of all clusters in a project. - Attributes: - clusters (Sequence[google.cloud.dataproc_v1beta2.types.Cluster]): - Output only. The clusters in the project. - next_page_token (str): - Output only. This token is included in the response if there - are more results to fetch. To fetch additional results, - provide this value as the ``page_token`` in a subsequent - ListClustersRequest. - """ - - @property - def raw_page(self): - return self - - clusters = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Cluster', - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DiagnoseClusterRequest(proto.Message): - r"""A request to collect cluster diagnostic information. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the cluster belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - cluster_name (str): - Required. The cluster name. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - cluster_name = proto.Field( - proto.STRING, - number=2, - ) - - -class DiagnoseClusterResults(proto.Message): - r"""The location of diagnostic output. - Attributes: - output_uri (str): - Output only. The Cloud Storage URI of the - diagnostic output. The output report is a plain - text file with a summary of collected - diagnostics. - """ - - output_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class ReservationAffinity(proto.Message): - r"""Reservation Affinity for consuming Zonal reservation. - Attributes: - consume_reservation_type (google.cloud.dataproc_v1beta2.types.ReservationAffinity.Type): - Optional. Type of reservation to consume - key (str): - Optional. Corresponds to the label key of - reservation resource. - values (Sequence[str]): - Optional. Corresponds to the label values of - reservation resource. - """ - class Type(proto.Enum): - r"""Indicates whether to consume capacity from an reservation or - not. - """ - TYPE_UNSPECIFIED = 0 - NO_RESERVATION = 1 - ANY_RESERVATION = 2 - SPECIFIC_RESERVATION = 3 - - consume_reservation_type = proto.Field( - proto.ENUM, - number=1, - enum=Type, - ) - key = proto.Field( - proto.STRING, - number=2, - ) - values = proto.RepeatedField( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/jobs.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/jobs.py deleted file mode 100644 index 6c736db5..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/jobs.py +++ /dev/null @@ -1,1364 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.dataproc.v1beta2', - manifest={ - 'LoggingConfig', - 'HadoopJob', - 'SparkJob', - 'PySparkJob', - 'QueryList', - 'HiveJob', - 'SparkSqlJob', - 'PigJob', - 'SparkRJob', - 'PrestoJob', - 'JobPlacement', - 'JobStatus', - 'JobReference', - 'YarnApplication', - 'Job', - 'JobScheduling', - 'JobMetadata', - 'SubmitJobRequest', - 'GetJobRequest', - 'ListJobsRequest', - 'UpdateJobRequest', - 'ListJobsResponse', - 'CancelJobRequest', - 'DeleteJobRequest', - }, -) - - -class LoggingConfig(proto.Message): - r"""The runtime logging config of the job. - Attributes: - driver_log_levels (Sequence[google.cloud.dataproc_v1beta2.types.LoggingConfig.DriverLogLevelsEntry]): - The per-package log levels for the driver. - This may include "root" package name to - configure rootLogger. Examples: - 'com.google = FATAL', 'root = INFO', - 'org.apache = DEBUG' - """ - class Level(proto.Enum): - r"""The Log4j level for job execution. When running an `Apache - Hive `__ job, Cloud Dataproc configures the - Hive client to an equivalent verbosity level. - """ - LEVEL_UNSPECIFIED = 0 - ALL = 1 - TRACE = 2 - DEBUG = 3 - INFO = 4 - WARN = 5 - ERROR = 6 - FATAL = 7 - OFF = 8 - - driver_log_levels = proto.MapField( - proto.STRING, - proto.ENUM, - number=2, - enum=Level, - ) - - -class HadoopJob(proto.Message): - r"""A Dataproc job for running `Apache Hadoop - MapReduce `__ - jobs on `Apache Hadoop - YARN `__. - - Attributes: - main_jar_file_uri (str): - The HCFS URI of the jar file containing the - main class. Examples: - 'gs://foo-bucket/analytics-binaries/extract- - useful-metrics-mr.jar' 'hdfs:/tmp/test- - samples/custom-wordcount.jar' - 'file:///home/usr/lib/hadoop-mapreduce/hadoop- - mapreduce-examples.jar' - main_class (str): - The name of the driver's main class. The jar file containing - the class must be in the default CLASSPATH or specified in - ``jar_file_uris``. - args (Sequence[str]): - Optional. The arguments to pass to the driver. Do not - include arguments, such as ``-libjars`` or ``-Dfoo=bar``, - that can be set as job properties, since a collision may - occur that causes an incorrect job submission. - jar_file_uris (Sequence[str]): - Optional. Jar file URIs to add to the - CLASSPATHs of the Hadoop driver and tasks. - file_uris (Sequence[str]): - Optional. HCFS (Hadoop Compatible Filesystem) - URIs of files to be copied to the working - directory of Hadoop drivers and distributed - tasks. Useful for naively parallel tasks. - archive_uris (Sequence[str]): - Optional. HCFS URIs of archives to be - extracted in the working directory of Hadoop - drivers and tasks. Supported file types: .jar, - .tar, .tar.gz, .tgz, or .zip. - properties (Sequence[google.cloud.dataproc_v1beta2.types.HadoopJob.PropertiesEntry]): - Optional. A mapping of property names to values, used to - configure Hadoop. Properties that conflict with values set - by the Dataproc API may be overwritten. Can include - properties set in /etc/hadoop/conf/*-site and classes in - user code. - logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - main_jar_file_uri = proto.Field( - proto.STRING, - number=1, - oneof='driver', - ) - main_class = proto.Field( - proto.STRING, - number=2, - oneof='driver', - ) - args = proto.RepeatedField( - proto.STRING, - number=3, - ) - jar_file_uris = proto.RepeatedField( - proto.STRING, - number=4, - ) - file_uris = proto.RepeatedField( - proto.STRING, - number=5, - ) - archive_uris = proto.RepeatedField( - proto.STRING, - number=6, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=8, - message='LoggingConfig', - ) - - -class SparkJob(proto.Message): - r"""A Dataproc job for running `Apache - Spark `__ applications on YARN. The - specification of the main method to call to drive the job. Specify - either the jar file that contains the main class or the main class - name. To pass both a main jar and a main class in that jar, add the - jar to ``CommonJob.jar_file_uris``, and then specify the main class - name in ``main_class``. - - Attributes: - main_jar_file_uri (str): - The HCFS URI of the jar file that contains - the main class. - main_class (str): - The name of the driver's main class. The jar file that - contains the class must be in the default CLASSPATH or - specified in ``jar_file_uris``. - args (Sequence[str]): - Optional. The arguments to pass to the driver. Do not - include arguments, such as ``--conf``, that can be set as - job properties, since a collision may occur that causes an - incorrect job submission. - jar_file_uris (Sequence[str]): - Optional. HCFS URIs of jar files to add to - the CLASSPATHs of the Spark driver and tasks. - file_uris (Sequence[str]): - Optional. HCFS URIs of files to be placed in - the working directory of each executor. Useful - for naively parallel tasks. - archive_uris (Sequence[str]): - Optional. HCFS URIs of archives to be - extracted into the working directory of each - executor. Supported file types: .jar, .tar, - .tar.gz, .tgz, and .zip. - properties (Sequence[google.cloud.dataproc_v1beta2.types.SparkJob.PropertiesEntry]): - Optional. A mapping of property names to - values, used to configure Spark. Properties that - conflict with values set by the Dataproc API may - be overwritten. Can include properties set in - /etc/spark/conf/spark-defaults.conf and classes - in user code. - logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - main_jar_file_uri = proto.Field( - proto.STRING, - number=1, - oneof='driver', - ) - main_class = proto.Field( - proto.STRING, - number=2, - oneof='driver', - ) - args = proto.RepeatedField( - proto.STRING, - number=3, - ) - jar_file_uris = proto.RepeatedField( - proto.STRING, - number=4, - ) - file_uris = proto.RepeatedField( - proto.STRING, - number=5, - ) - archive_uris = proto.RepeatedField( - proto.STRING, - number=6, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=8, - message='LoggingConfig', - ) - - -class PySparkJob(proto.Message): - r"""A Dataproc job for running `Apache - PySpark `__ - applications on YARN. - - Attributes: - main_python_file_uri (str): - Required. The HCFS URI of the main Python - file to use as the driver. Must be a .py file. - args (Sequence[str]): - Optional. The arguments to pass to the driver. Do not - include arguments, such as ``--conf``, that can be set as - job properties, since a collision may occur that causes an - incorrect job submission. - python_file_uris (Sequence[str]): - Optional. HCFS file URIs of Python files to - pass to the PySpark framework. Supported file - types: .py, .egg, and .zip. - jar_file_uris (Sequence[str]): - Optional. HCFS URIs of jar files to add to - the CLASSPATHs of the Python driver and tasks. - file_uris (Sequence[str]): - Optional. HCFS URIs of files to be placed in - the working directory of each executor. Useful - for naively parallel tasks. - archive_uris (Sequence[str]): - Optional. HCFS URIs of archives to be - extracted into the working directory of each - executor. Supported file types: .jar, .tar, - .tar.gz, .tgz, and .zip. - properties (Sequence[google.cloud.dataproc_v1beta2.types.PySparkJob.PropertiesEntry]): - Optional. A mapping of property names to - values, used to configure PySpark. Properties - that conflict with values set by the Dataproc - API may be overwritten. Can include properties - set in - /etc/spark/conf/spark-defaults.conf and classes - in user code. - logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - main_python_file_uri = proto.Field( - proto.STRING, - number=1, - ) - args = proto.RepeatedField( - proto.STRING, - number=2, - ) - python_file_uris = proto.RepeatedField( - proto.STRING, - number=3, - ) - jar_file_uris = proto.RepeatedField( - proto.STRING, - number=4, - ) - file_uris = proto.RepeatedField( - proto.STRING, - number=5, - ) - archive_uris = proto.RepeatedField( - proto.STRING, - number=6, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=8, - message='LoggingConfig', - ) - - -class QueryList(proto.Message): - r"""A list of queries to run on a cluster. - Attributes: - queries (Sequence[str]): - Required. The queries to execute. You do not need to - terminate a query with a semicolon. Multiple queries can be - specified in one string by separating each with a semicolon. - Here is an example of an Cloud Dataproc API snippet that - uses a QueryList to specify a HiveJob: - - :: - - "hiveJob": { - "queryList": { - "queries": [ - "query1", - "query2", - "query3;query4", - ] - } - } - """ - - queries = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class HiveJob(proto.Message): - r"""A Dataproc job for running `Apache - Hive `__ queries on YARN. - - Attributes: - query_file_uri (str): - The HCFS URI of the script that contains Hive - queries. - query_list (google.cloud.dataproc_v1beta2.types.QueryList): - A list of queries. - continue_on_failure (bool): - Optional. Whether to continue executing queries if a query - fails. The default value is ``false``. Setting to ``true`` - can be useful when executing independent parallel queries. - script_variables (Sequence[google.cloud.dataproc_v1beta2.types.HiveJob.ScriptVariablesEntry]): - Optional. Mapping of query variable names to values - (equivalent to the Hive command: ``SET name="value";``). - properties (Sequence[google.cloud.dataproc_v1beta2.types.HiveJob.PropertiesEntry]): - Optional. A mapping of property names and values, used to - configure Hive. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in /etc/hadoop/conf/*-site.xml, - /etc/hive/conf/hive-site.xml, and classes in user code. - jar_file_uris (Sequence[str]): - Optional. HCFS URIs of jar files to add to - the CLASSPATH of the Hive server and Hadoop - MapReduce (MR) tasks. Can contain Hive SerDes - and UDFs. - """ - - query_file_uri = proto.Field( - proto.STRING, - number=1, - oneof='queries', - ) - query_list = proto.Field( - proto.MESSAGE, - number=2, - oneof='queries', - message='QueryList', - ) - continue_on_failure = proto.Field( - proto.BOOL, - number=3, - ) - script_variables = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - jar_file_uris = proto.RepeatedField( - proto.STRING, - number=6, - ) - - -class SparkSqlJob(proto.Message): - r"""A Dataproc job for running `Apache Spark - SQL `__ queries. - - Attributes: - query_file_uri (str): - The HCFS URI of the script that contains SQL - queries. - query_list (google.cloud.dataproc_v1beta2.types.QueryList): - A list of queries. - script_variables (Sequence[google.cloud.dataproc_v1beta2.types.SparkSqlJob.ScriptVariablesEntry]): - Optional. Mapping of query variable names to values - (equivalent to the Spark SQL command: SET - ``name="value";``). - properties (Sequence[google.cloud.dataproc_v1beta2.types.SparkSqlJob.PropertiesEntry]): - Optional. A mapping of property names to - values, used to configure Spark SQL's SparkConf. - Properties that conflict with values set by the - Dataproc API may be overwritten. - jar_file_uris (Sequence[str]): - Optional. HCFS URIs of jar files to be added - to the Spark CLASSPATH. - logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - query_file_uri = proto.Field( - proto.STRING, - number=1, - oneof='queries', - ) - query_list = proto.Field( - proto.MESSAGE, - number=2, - oneof='queries', - message='QueryList', - ) - script_variables = proto.MapField( - proto.STRING, - proto.STRING, - number=3, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - jar_file_uris = proto.RepeatedField( - proto.STRING, - number=56, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=6, - message='LoggingConfig', - ) - - -class PigJob(proto.Message): - r"""A Dataproc job for running `Apache Pig `__ - queries on YARN. - - Attributes: - query_file_uri (str): - The HCFS URI of the script that contains the - Pig queries. - query_list (google.cloud.dataproc_v1beta2.types.QueryList): - A list of queries. - continue_on_failure (bool): - Optional. Whether to continue executing queries if a query - fails. The default value is ``false``. Setting to ``true`` - can be useful when executing independent parallel queries. - script_variables (Sequence[google.cloud.dataproc_v1beta2.types.PigJob.ScriptVariablesEntry]): - Optional. Mapping of query variable names to values - (equivalent to the Pig command: ``name=[value]``). - properties (Sequence[google.cloud.dataproc_v1beta2.types.PigJob.PropertiesEntry]): - Optional. A mapping of property names to values, used to - configure Pig. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in /etc/hadoop/conf/*-site.xml, - /etc/pig/conf/pig.properties, and classes in user code. - jar_file_uris (Sequence[str]): - Optional. HCFS URIs of jar files to add to - the CLASSPATH of the Pig Client and Hadoop - MapReduce (MR) tasks. Can contain Pig UDFs. - logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - query_file_uri = proto.Field( - proto.STRING, - number=1, - oneof='queries', - ) - query_list = proto.Field( - proto.MESSAGE, - number=2, - oneof='queries', - message='QueryList', - ) - continue_on_failure = proto.Field( - proto.BOOL, - number=3, - ) - script_variables = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - jar_file_uris = proto.RepeatedField( - proto.STRING, - number=6, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=7, - message='LoggingConfig', - ) - - -class SparkRJob(proto.Message): - r"""A Dataproc job for running `Apache - SparkR `__ - applications on YARN. - - Attributes: - main_r_file_uri (str): - Required. The HCFS URI of the main R file to - use as the driver. Must be a .R file. - args (Sequence[str]): - Optional. The arguments to pass to the driver. Do not - include arguments, such as ``--conf``, that can be set as - job properties, since a collision may occur that causes an - incorrect job submission. - file_uris (Sequence[str]): - Optional. HCFS URIs of files to be placed in - the working directory of each executor. Useful - for naively parallel tasks. - archive_uris (Sequence[str]): - Optional. HCFS URIs of archives to be - extracted into the working directory of each - executor. Supported file types: .jar, .tar, - .tar.gz, .tgz, and .zip. - properties (Sequence[google.cloud.dataproc_v1beta2.types.SparkRJob.PropertiesEntry]): - Optional. A mapping of property names to - values, used to configure SparkR. Properties - that conflict with values set by the Dataproc - API may be overwritten. Can include properties - set in - /etc/spark/conf/spark-defaults.conf and classes - in user code. - logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - main_r_file_uri = proto.Field( - proto.STRING, - number=1, - ) - args = proto.RepeatedField( - proto.STRING, - number=2, - ) - file_uris = proto.RepeatedField( - proto.STRING, - number=3, - ) - archive_uris = proto.RepeatedField( - proto.STRING, - number=4, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=6, - message='LoggingConfig', - ) - - -class PrestoJob(proto.Message): - r"""A Dataproc job for running `Presto `__ - queries. **IMPORTANT**: The `Dataproc Presto Optional - Component `__ - must be enabled when the cluster is created to submit a Presto job - to the cluster. - - Attributes: - query_file_uri (str): - The HCFS URI of the script that contains SQL - queries. - query_list (google.cloud.dataproc_v1beta2.types.QueryList): - A list of queries. - continue_on_failure (bool): - Optional. Whether to continue executing queries if a query - fails. The default value is ``false``. Setting to ``true`` - can be useful when executing independent parallel queries. - output_format (str): - Optional. The format in which query output - will be displayed. See the Presto documentation - for supported output formats - client_tags (Sequence[str]): - Optional. Presto client tags to attach to - this query - properties (Sequence[google.cloud.dataproc_v1beta2.types.PrestoJob.PropertiesEntry]): - Optional. A mapping of property names to values. Used to set - Presto `session - properties `__ - Equivalent to using the --session flag in the Presto CLI - logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): - Optional. The runtime log config for job - execution. - """ - - query_file_uri = proto.Field( - proto.STRING, - number=1, - oneof='queries', - ) - query_list = proto.Field( - proto.MESSAGE, - number=2, - oneof='queries', - message='QueryList', - ) - continue_on_failure = proto.Field( - proto.BOOL, - number=3, - ) - output_format = proto.Field( - proto.STRING, - number=4, - ) - client_tags = proto.RepeatedField( - proto.STRING, - number=5, - ) - properties = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - logging_config = proto.Field( - proto.MESSAGE, - number=7, - message='LoggingConfig', - ) - - -class JobPlacement(proto.Message): - r"""Dataproc job config. - Attributes: - cluster_name (str): - Required. The name of the cluster where the - job will be submitted. - cluster_uuid (str): - Output only. A cluster UUID generated by the - Dataproc service when the job is submitted. - """ - - cluster_name = proto.Field( - proto.STRING, - number=1, - ) - cluster_uuid = proto.Field( - proto.STRING, - number=2, - ) - - -class JobStatus(proto.Message): - r"""Dataproc job status. - Attributes: - state (google.cloud.dataproc_v1beta2.types.JobStatus.State): - Output only. A state message specifying the - overall job state. - details (str): - Output only. Optional Job state details, such - as an error description if the state is - ERROR. - state_start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time when this state was - entered. - substate (google.cloud.dataproc_v1beta2.types.JobStatus.Substate): - Output only. Additional state information, - which includes status reported by the agent. - """ - class State(proto.Enum): - r"""The job state.""" - STATE_UNSPECIFIED = 0 - PENDING = 1 - SETUP_DONE = 8 - RUNNING = 2 - CANCEL_PENDING = 3 - CANCEL_STARTED = 7 - CANCELLED = 4 - DONE = 5 - ERROR = 6 - ATTEMPT_FAILURE = 9 - - class Substate(proto.Enum): - r"""The job substate.""" - UNSPECIFIED = 0 - SUBMITTED = 1 - QUEUED = 2 - STALE_STATUS = 3 - - state = proto.Field( - proto.ENUM, - number=1, - enum=State, - ) - details = proto.Field( - proto.STRING, - number=2, - ) - state_start_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - substate = proto.Field( - proto.ENUM, - number=7, - enum=Substate, - ) - - -class JobReference(proto.Message): - r"""Encapsulates the full scoping used to reference a job. - Attributes: - project_id (str): - Optional. The ID of the Google Cloud Platform - project that the job belongs to. If specified, - must match the request project ID. - job_id (str): - Optional. The job ID, which must be unique within the - project. The ID must contain only letters (a-z, A-Z), - numbers (0-9), underscores (_), or hyphens (-). The maximum - length is 100 characters. - - If not specified by the caller, the job ID will be provided - by the server. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - job_id = proto.Field( - proto.STRING, - number=2, - ) - - -class YarnApplication(proto.Message): - r"""A YARN application created by a job. Application information is a - subset of - org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. - - **Beta Feature**: This report is available for testing purposes - only. It may be changed before final release. - - Attributes: - name (str): - Output only. The application name. - state (google.cloud.dataproc_v1beta2.types.YarnApplication.State): - Output only. The application state. - progress (float): - Output only. The numerical progress of the - application, from 1 to 100. - tracking_url (str): - Output only. The HTTP URL of the - ApplicationMaster, HistoryServer, or - TimelineServer that provides application- - specific information. The URL uses the internal - hostname, and requires a proxy server for - resolution and, possibly, access. - """ - class State(proto.Enum): - r"""The application state, corresponding to - YarnProtos.YarnApplicationStateProto. - """ - STATE_UNSPECIFIED = 0 - NEW = 1 - NEW_SAVING = 2 - SUBMITTED = 3 - ACCEPTED = 4 - RUNNING = 5 - FINISHED = 6 - FAILED = 7 - KILLED = 8 - - name = proto.Field( - proto.STRING, - number=1, - ) - state = proto.Field( - proto.ENUM, - number=2, - enum=State, - ) - progress = proto.Field( - proto.FLOAT, - number=3, - ) - tracking_url = proto.Field( - proto.STRING, - number=4, - ) - - -class Job(proto.Message): - r"""A Dataproc job resource. - Attributes: - reference (google.cloud.dataproc_v1beta2.types.JobReference): - Optional. The fully qualified reference to the job, which - can be used to obtain the equivalent REST path of the job - resource. If this property is not specified when a job is - created, the server generates a job_id. - placement (google.cloud.dataproc_v1beta2.types.JobPlacement): - Required. Job information, including how, - when, and where to run the job. - hadoop_job (google.cloud.dataproc_v1beta2.types.HadoopJob): - Optional. Job is a Hadoop job. - spark_job (google.cloud.dataproc_v1beta2.types.SparkJob): - Optional. Job is a Spark job. - pyspark_job (google.cloud.dataproc_v1beta2.types.PySparkJob): - Optional. Job is a PySpark job. - hive_job (google.cloud.dataproc_v1beta2.types.HiveJob): - Optional. Job is a Hive job. - pig_job (google.cloud.dataproc_v1beta2.types.PigJob): - Optional. Job is a Pig job. - spark_r_job (google.cloud.dataproc_v1beta2.types.SparkRJob): - Optional. Job is a SparkR job. - spark_sql_job (google.cloud.dataproc_v1beta2.types.SparkSqlJob): - Optional. Job is a SparkSql job. - presto_job (google.cloud.dataproc_v1beta2.types.PrestoJob): - Optional. Job is a Presto job. - status (google.cloud.dataproc_v1beta2.types.JobStatus): - Output only. The job status. Additional application-specific - status information may be contained in the type_job and - yarn_applications fields. - status_history (Sequence[google.cloud.dataproc_v1beta2.types.JobStatus]): - Output only. The previous job status. - yarn_applications (Sequence[google.cloud.dataproc_v1beta2.types.YarnApplication]): - Output only. The collection of YARN applications spun up by - this job. - - **Beta** Feature: This report is available for testing - purposes only. It may be changed before final release. - submitted_by (str): - Output only. The email address of the user - submitting the job. For jobs submitted on the - cluster, the address is - username@hostname. - driver_output_resource_uri (str): - Output only. A URI pointing to the location - of the stdout of the job's driver program. - driver_control_files_uri (str): - Output only. If present, the location of miscellaneous - control files which may be used as part of job setup and - handling. If not present, control files may be placed in the - same location as ``driver_output_uri``. - labels (Sequence[google.cloud.dataproc_v1beta2.types.Job.LabelsEntry]): - Optional. The labels to associate with this job. Label - **keys** must contain 1 to 63 characters, and must conform - to `RFC 1035 `__. - Label **values** may be empty, but, if present, must contain - 1 to 63 characters, and must conform to `RFC - 1035 `__. No more than - 32 labels can be associated with a job. - scheduling (google.cloud.dataproc_v1beta2.types.JobScheduling): - Optional. Job scheduling configuration. - job_uuid (str): - Output only. A UUID that uniquely identifies a job within - the project over time. This is in contrast to a - user-settable reference.job_id that may be reused over time. - done (bool): - Output only. Indicates whether the job is completed. If the - value is ``false``, the job is still in progress. If - ``true``, the job is completed, and ``status.state`` field - will indicate if it was successful, failed, or cancelled. - """ - - reference = proto.Field( - proto.MESSAGE, - number=1, - message='JobReference', - ) - placement = proto.Field( - proto.MESSAGE, - number=2, - message='JobPlacement', - ) - hadoop_job = proto.Field( - proto.MESSAGE, - number=3, - oneof='type_job', - message='HadoopJob', - ) - spark_job = proto.Field( - proto.MESSAGE, - number=4, - oneof='type_job', - message='SparkJob', - ) - pyspark_job = proto.Field( - proto.MESSAGE, - number=5, - oneof='type_job', - message='PySparkJob', - ) - hive_job = proto.Field( - proto.MESSAGE, - number=6, - oneof='type_job', - message='HiveJob', - ) - pig_job = proto.Field( - proto.MESSAGE, - number=7, - oneof='type_job', - message='PigJob', - ) - spark_r_job = proto.Field( - proto.MESSAGE, - number=21, - oneof='type_job', - message='SparkRJob', - ) - spark_sql_job = proto.Field( - proto.MESSAGE, - number=12, - oneof='type_job', - message='SparkSqlJob', - ) - presto_job = proto.Field( - proto.MESSAGE, - number=23, - oneof='type_job', - message='PrestoJob', - ) - status = proto.Field( - proto.MESSAGE, - number=8, - message='JobStatus', - ) - status_history = proto.RepeatedField( - proto.MESSAGE, - number=13, - message='JobStatus', - ) - yarn_applications = proto.RepeatedField( - proto.MESSAGE, - number=9, - message='YarnApplication', - ) - submitted_by = proto.Field( - proto.STRING, - number=10, - ) - driver_output_resource_uri = proto.Field( - proto.STRING, - number=17, - ) - driver_control_files_uri = proto.Field( - proto.STRING, - number=15, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=18, - ) - scheduling = proto.Field( - proto.MESSAGE, - number=20, - message='JobScheduling', - ) - job_uuid = proto.Field( - proto.STRING, - number=22, - ) - done = proto.Field( - proto.BOOL, - number=24, - ) - - -class JobScheduling(proto.Message): - r"""Job scheduling options. - Attributes: - max_failures_per_hour (int): - Optional. Maximum number of times per hour a - driver may be restarted as a result of driver - terminating with non-zero code before job is - reported failed. - - A job may be reported as thrashing if driver - exits with non-zero code 4 times within 10 - minute window. - - Maximum value is 10. - """ - - max_failures_per_hour = proto.Field( - proto.INT32, - number=1, - ) - - -class JobMetadata(proto.Message): - r"""Job Operation metadata. - Attributes: - job_id (str): - Output only. The job id. - status (google.cloud.dataproc_v1beta2.types.JobStatus): - Output only. Most recent job status. - operation_type (str): - Output only. Operation type. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Job submission time. - """ - - job_id = proto.Field( - proto.STRING, - number=1, - ) - status = proto.Field( - proto.MESSAGE, - number=2, - message='JobStatus', - ) - operation_type = proto.Field( - proto.STRING, - number=3, - ) - start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class SubmitJobRequest(proto.Message): - r"""A request to submit a job. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - job (google.cloud.dataproc_v1beta2.types.Job): - Required. The job resource. - request_id (str): - Optional. A unique id used to identify the request. If the - server receives two - [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] - requests with the same id, then the second request will be - ignored and the first - [Job][google.cloud.dataproc.v1beta2.Job] created and stored - in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - job = proto.Field( - proto.MESSAGE, - number=2, - message='Job', - ) - request_id = proto.Field( - proto.STRING, - number=4, - ) - - -class GetJobRequest(proto.Message): - r"""A request to get the resource representation for a job in a - project. - - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - job_id (str): - Required. The job ID. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - job_id = proto.Field( - proto.STRING, - number=2, - ) - - -class ListJobsRequest(proto.Message): - r"""A request to list jobs in a project. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - page_size (int): - Optional. The number of results to return in - each response. - page_token (str): - Optional. The page token, returned by a - previous call, to request the next page of - results. - cluster_name (str): - Optional. If set, the returned jobs list - includes only jobs that were submitted to the - named cluster. - job_state_matcher (google.cloud.dataproc_v1beta2.types.ListJobsRequest.JobStateMatcher): - Optional. Specifies enumerated categories of jobs to list. - (default = match ALL jobs). - - If ``filter`` is provided, ``jobStateMatcher`` will be - ignored. - filter (str): - Optional. A filter constraining the jobs to list. Filters - are case-sensitive and have the following syntax: - - [field = value] AND [field [= value]] ... - - where **field** is ``status.state`` or ``labels.[KEY]``, and - ``[KEY]`` is a label key. **value** can be ``*`` to match - all values. ``status.state`` can be either ``ACTIVE`` or - ``NON_ACTIVE``. Only the logical ``AND`` operator is - supported; space-separated items are treated as having an - implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND labels.env = staging AND - labels.starred = \* - """ - class JobStateMatcher(proto.Enum): - r"""A matcher that specifies categories of job states.""" - ALL = 0 - ACTIVE = 1 - NON_ACTIVE = 2 - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=6, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - cluster_name = proto.Field( - proto.STRING, - number=4, - ) - job_state_matcher = proto.Field( - proto.ENUM, - number=5, - enum=JobStateMatcher, - ) - filter = proto.Field( - proto.STRING, - number=7, - ) - - -class UpdateJobRequest(proto.Message): - r"""A request to update a job. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - job_id (str): - Required. The job ID. - job (google.cloud.dataproc_v1beta2.types.Job): - Required. The changes to the job. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Specifies the path, relative to Job, of the field - to update. For example, to update the labels of a Job the - update_mask parameter would be specified as labels, and the - ``PATCH`` request body would specify the new value. Note: - Currently, labels is the only field that can be updated. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=2, - ) - job_id = proto.Field( - proto.STRING, - number=3, - ) - job = proto.Field( - proto.MESSAGE, - number=4, - message='Job', - ) - update_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListJobsResponse(proto.Message): - r"""A list of jobs in a project. - Attributes: - jobs (Sequence[google.cloud.dataproc_v1beta2.types.Job]): - Output only. Jobs list. - next_page_token (str): - Optional. This token is included in the response if there - are more results to fetch. To fetch additional results, - provide this value as the ``page_token`` in a subsequent - ListJobsRequest. - """ - - @property - def raw_page(self): - return self - - jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Job', - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class CancelJobRequest(proto.Message): - r"""A request to cancel a job. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - job_id (str): - Required. The job ID. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - job_id = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteJobRequest(proto.Message): - r"""A request to delete a job. - Attributes: - project_id (str): - Required. The ID of the Google Cloud Platform - project that the job belongs to. - region (str): - Required. The Dataproc region in which to - handle the request. - job_id (str): - Required. The job ID. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - region = proto.Field( - proto.STRING, - number=3, - ) - job_id = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/operations.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/operations.py deleted file mode 100644 index 3d2d2471..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/operations.py +++ /dev/null @@ -1,133 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.dataproc.v1beta2', - manifest={ - 'ClusterOperationStatus', - 'ClusterOperationMetadata', - }, -) - - -class ClusterOperationStatus(proto.Message): - r"""The status of the operation. - Attributes: - state (google.cloud.dataproc_v1beta2.types.ClusterOperationStatus.State): - Output only. A message containing the - operation state. - inner_state (str): - Output only. A message containing the - detailed operation state. - details (str): - Output only. A message containing any - operation metadata details. - state_start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time this state was entered. - """ - class State(proto.Enum): - r"""The operation state.""" - UNKNOWN = 0 - PENDING = 1 - RUNNING = 2 - DONE = 3 - - state = proto.Field( - proto.ENUM, - number=1, - enum=State, - ) - inner_state = proto.Field( - proto.STRING, - number=2, - ) - details = proto.Field( - proto.STRING, - number=3, - ) - state_start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class ClusterOperationMetadata(proto.Message): - r"""Metadata describing the operation. - Attributes: - cluster_name (str): - Output only. Name of the cluster for the - operation. - cluster_uuid (str): - Output only. Cluster UUID for the operation. - status (google.cloud.dataproc_v1beta2.types.ClusterOperationStatus): - Output only. Current operation status. - status_history (Sequence[google.cloud.dataproc_v1beta2.types.ClusterOperationStatus]): - Output only. The previous operation status. - operation_type (str): - Output only. The operation type. - description (str): - Output only. Short description of operation. - labels (Sequence[google.cloud.dataproc_v1beta2.types.ClusterOperationMetadata.LabelsEntry]): - Output only. Labels associated with the - operation - warnings (Sequence[str]): - Output only. Errors encountered during - operation execution. - """ - - cluster_name = proto.Field( - proto.STRING, - number=7, - ) - cluster_uuid = proto.Field( - proto.STRING, - number=8, - ) - status = proto.Field( - proto.MESSAGE, - number=9, - message='ClusterOperationStatus', - ) - status_history = proto.RepeatedField( - proto.MESSAGE, - number=10, - message='ClusterOperationStatus', - ) - operation_type = proto.Field( - proto.STRING, - number=11, - ) - description = proto.Field( - proto.STRING, - number=12, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=13, - ) - warnings = proto.RepeatedField( - proto.STRING, - number=14, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/shared.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/shared.py deleted file mode 100644 index 2a9671ba..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/shared.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.dataproc.v1beta2', - manifest={ - 'Component', - }, -) - - -class Component(proto.Enum): - r"""Cluster components that can be activated.""" - COMPONENT_UNSPECIFIED = 0 - ANACONDA = 5 - DRUID = 9 - HBASE = 11 - HIVE_WEBHCAT = 3 - JUPYTER = 1 - KERBEROS = 7 - PRESTO = 6 - RANGER = 12 - SOLR = 10 - ZEPPELIN = 4 - ZOOKEEPER = 8 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/workflow_templates.py b/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/workflow_templates.py deleted file mode 100644 index b159829e..00000000 --- a/owl-bot-staging/v1beta2/google/cloud/dataproc_v1beta2/types/workflow_templates.py +++ /dev/null @@ -1,1073 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.dataproc_v1beta2.types import clusters -from google.cloud.dataproc_v1beta2.types import jobs as gcd_jobs -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.dataproc.v1beta2', - manifest={ - 'WorkflowTemplate', - 'WorkflowTemplatePlacement', - 'ManagedCluster', - 'ClusterSelector', - 'OrderedJob', - 'TemplateParameter', - 'ParameterValidation', - 'RegexValidation', - 'ValueValidation', - 'WorkflowMetadata', - 'ClusterOperation', - 'WorkflowGraph', - 'WorkflowNode', - 'CreateWorkflowTemplateRequest', - 'GetWorkflowTemplateRequest', - 'InstantiateWorkflowTemplateRequest', - 'InstantiateInlineWorkflowTemplateRequest', - 'UpdateWorkflowTemplateRequest', - 'ListWorkflowTemplatesRequest', - 'ListWorkflowTemplatesResponse', - 'DeleteWorkflowTemplateRequest', - }, -) - - -class WorkflowTemplate(proto.Message): - r"""A Dataproc workflow template resource. - Attributes: - id (str): - Required. The template id. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). Cannot begin or end with - underscore or hyphen. Must consist of between 3 and 50 - characters. - - . - name (str): - Output only. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates``, the resource - name of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates``, the - resource name of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): - Optional. Used to perform a consistent read-modify-write. - - This field should be left blank for a - ``CreateWorkflowTemplate`` request. It is required for an - ``UpdateWorkflowTemplate`` request, and must match the - current server version. A typical update template flow would - fetch the current template with a ``GetWorkflowTemplate`` - request, which will return the current template with the - ``version`` field filled in with the current server version. - The user updates other fields in the template, then returns - it as part of the ``UpdateWorkflowTemplate`` request. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time template was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time template was last - updated. - labels (Sequence[google.cloud.dataproc_v1beta2.types.WorkflowTemplate.LabelsEntry]): - Optional. The labels to associate with this template. These - labels will be propagated to all jobs and clusters created - by the workflow instance. - - Label **keys** must contain 1 to 63 characters, and must - conform to `RFC - 1035 `__. - - Label **values** may be empty, but, if present, must contain - 1 to 63 characters, and must conform to `RFC - 1035 `__. - - No more than 32 labels can be associated with a template. - placement (google.cloud.dataproc_v1beta2.types.WorkflowTemplatePlacement): - Required. WorkflowTemplate scheduling - information. - jobs (Sequence[google.cloud.dataproc_v1beta2.types.OrderedJob]): - Required. The Directed Acyclic Graph of Jobs - to submit. - parameters (Sequence[google.cloud.dataproc_v1beta2.types.TemplateParameter]): - Optional. Template parameters whose values - are substituted into the template. Values for - parameters must be provided when the template is - instantiated. - dag_timeout (google.protobuf.duration_pb2.Duration): - Optional. Timeout duration for the DAG of jobs. You can use - "s", "m", "h", and "d" suffixes for second, minute, hour, - and day duration values, respectively. The timeout duration - must be from 10 minutes ("10m") to 24 hours ("24h" or "1d"). - The timer begins when the first job is submitted. If the - workflow is running at the end of the timeout period, any - remaining jobs are cancelled, the workflow is terminated, - and if the workflow was running on a `managed - cluster `__, - the cluster is deleted. - """ - - id = proto.Field( - proto.STRING, - number=2, - ) - name = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.INT32, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - placement = proto.Field( - proto.MESSAGE, - number=7, - message='WorkflowTemplatePlacement', - ) - jobs = proto.RepeatedField( - proto.MESSAGE, - number=8, - message='OrderedJob', - ) - parameters = proto.RepeatedField( - proto.MESSAGE, - number=9, - message='TemplateParameter', - ) - dag_timeout = proto.Field( - proto.MESSAGE, - number=10, - message=duration_pb2.Duration, - ) - - -class WorkflowTemplatePlacement(proto.Message): - r"""Specifies workflow execution target. - - Either ``managed_cluster`` or ``cluster_selector`` is required. - - Attributes: - managed_cluster (google.cloud.dataproc_v1beta2.types.ManagedCluster): - Optional. A cluster that is managed by the - workflow. - cluster_selector (google.cloud.dataproc_v1beta2.types.ClusterSelector): - Optional. A selector that chooses target - cluster for jobs based on metadata. - - The selector is evaluated at the time each job - is submitted. - """ - - managed_cluster = proto.Field( - proto.MESSAGE, - number=1, - oneof='placement', - message='ManagedCluster', - ) - cluster_selector = proto.Field( - proto.MESSAGE, - number=2, - oneof='placement', - message='ClusterSelector', - ) - - -class ManagedCluster(proto.Message): - r"""Cluster that is managed by the workflow. - Attributes: - cluster_name (str): - Required. The cluster name prefix. A unique - cluster name will be formed by appending a - random suffix. - The name must contain only lower-case letters - (a-z), numbers (0-9), and hyphens (-). Must - begin with a letter. Cannot begin or end with - hyphen. Must consist of between 2 and 35 - characters. - config (google.cloud.dataproc_v1beta2.types.ClusterConfig): - Required. The cluster configuration. - labels (Sequence[google.cloud.dataproc_v1beta2.types.ManagedCluster.LabelsEntry]): - Optional. The labels to associate with this cluster. - - Label keys must be between 1 and 63 characters long, and - must conform to the following PCRE regular expression: - [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - - Label values must be between 1 and 63 characters long, and - must conform to the following PCRE regular expression: - [\p{Ll}\p{Lo}\p{N}_-]{0,63} - - No more than 32 labels can be associated with a given - cluster. - """ - - cluster_name = proto.Field( - proto.STRING, - number=2, - ) - config = proto.Field( - proto.MESSAGE, - number=3, - message=clusters.ClusterConfig, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - - -class ClusterSelector(proto.Message): - r"""A selector that chooses target cluster for jobs based on - metadata. - - Attributes: - zone (str): - Optional. The zone where workflow process - executes. This parameter does not affect the - selection of the cluster. - If unspecified, the zone of the first cluster - matching the selector is used. - cluster_labels (Sequence[google.cloud.dataproc_v1beta2.types.ClusterSelector.ClusterLabelsEntry]): - Required. The cluster labels. Cluster must - have all labels to match. - """ - - zone = proto.Field( - proto.STRING, - number=1, - ) - cluster_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - - -class OrderedJob(proto.Message): - r"""A job executed by the workflow. - Attributes: - step_id (str): - Required. The step id. The id must be unique among all jobs - within the template. - - The step id is used as prefix for job id, as job - ``goog-dataproc-workflow-step-id`` label, and in - [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] - field from other steps. - - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). Cannot begin or end with - underscore or hyphen. Must consist of between 3 and 50 - characters. - hadoop_job (google.cloud.dataproc_v1beta2.types.HadoopJob): - Optional. Job is a Hadoop job. - spark_job (google.cloud.dataproc_v1beta2.types.SparkJob): - Optional. Job is a Spark job. - pyspark_job (google.cloud.dataproc_v1beta2.types.PySparkJob): - Optional. Job is a PySpark job. - hive_job (google.cloud.dataproc_v1beta2.types.HiveJob): - Optional. Job is a Hive job. - pig_job (google.cloud.dataproc_v1beta2.types.PigJob): - Optional. Job is a Pig job. - spark_r_job (google.cloud.dataproc_v1beta2.types.SparkRJob): - Optional. Job is a SparkR job. - spark_sql_job (google.cloud.dataproc_v1beta2.types.SparkSqlJob): - Optional. Job is a SparkSql job. - presto_job (google.cloud.dataproc_v1beta2.types.PrestoJob): - Optional. Job is a Presto job. - labels (Sequence[google.cloud.dataproc_v1beta2.types.OrderedJob.LabelsEntry]): - Optional. The labels to associate with this job. - - Label keys must be between 1 and 63 characters long, and - must conform to the following regular expression: - [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - - Label values must be between 1 and 63 characters long, and - must conform to the following regular expression: - [\p{Ll}\p{Lo}\p{N}_-]{0,63} - - No more than 32 labels can be associated with a given job. - scheduling (google.cloud.dataproc_v1beta2.types.JobScheduling): - Optional. Job scheduling configuration. - prerequisite_step_ids (Sequence[str]): - Optional. The optional list of prerequisite job step_ids. If - not specified, the job will start at the beginning of - workflow. - """ - - step_id = proto.Field( - proto.STRING, - number=1, - ) - hadoop_job = proto.Field( - proto.MESSAGE, - number=2, - oneof='job_type', - message=gcd_jobs.HadoopJob, - ) - spark_job = proto.Field( - proto.MESSAGE, - number=3, - oneof='job_type', - message=gcd_jobs.SparkJob, - ) - pyspark_job = proto.Field( - proto.MESSAGE, - number=4, - oneof='job_type', - message=gcd_jobs.PySparkJob, - ) - hive_job = proto.Field( - proto.MESSAGE, - number=5, - oneof='job_type', - message=gcd_jobs.HiveJob, - ) - pig_job = proto.Field( - proto.MESSAGE, - number=6, - oneof='job_type', - message=gcd_jobs.PigJob, - ) - spark_r_job = proto.Field( - proto.MESSAGE, - number=11, - oneof='job_type', - message=gcd_jobs.SparkRJob, - ) - spark_sql_job = proto.Field( - proto.MESSAGE, - number=7, - oneof='job_type', - message=gcd_jobs.SparkSqlJob, - ) - presto_job = proto.Field( - proto.MESSAGE, - number=12, - oneof='job_type', - message=gcd_jobs.PrestoJob, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=8, - ) - scheduling = proto.Field( - proto.MESSAGE, - number=9, - message=gcd_jobs.JobScheduling, - ) - prerequisite_step_ids = proto.RepeatedField( - proto.STRING, - number=10, - ) - - -class TemplateParameter(proto.Message): - r"""A configurable parameter that replaces one or more fields in - the template. Parameterizable fields: - - Labels - - File uris - - Job properties - - Job arguments - - Script variables - - Main class (in HadoopJob and SparkJob) - - Zone (in ClusterSelector) - - Attributes: - name (str): - Required. Parameter name. The parameter name is used as the - key, and paired with the parameter value, which are passed - to the template when the template is instantiated. The name - must contain only capital letters (A-Z), numbers (0-9), and - underscores (_), and must not start with a number. The - maximum length is 40 characters. - fields (Sequence[str]): - Required. Paths to all fields that the parameter replaces. A - field is allowed to appear in at most one parameter's list - of field paths. - - A field path is similar in syntax to a - [google.protobuf.FieldMask][google.protobuf.FieldMask]. For - example, a field path that references the zone field of a - workflow template's cluster selector would be specified as - ``placement.clusterSelector.zone``. - - Also, field paths can reference fields using the following - syntax: - - - Values in maps can be referenced by key: - - - labels['key'] - - placement.clusterSelector.clusterLabels['key'] - - placement.managedCluster.labels['key'] - - placement.clusterSelector.clusterLabels['key'] - - jobs['step-id'].labels['key'] - - - Jobs in the jobs list can be referenced by step-id: - - - jobs['step-id'].hadoopJob.mainJarFileUri - - jobs['step-id'].hiveJob.queryFileUri - - jobs['step-id'].pySparkJob.mainPythonFileUri - - jobs['step-id'].hadoopJob.jarFileUris[0] - - jobs['step-id'].hadoopJob.archiveUris[0] - - jobs['step-id'].hadoopJob.fileUris[0] - - jobs['step-id'].pySparkJob.pythonFileUris[0] - - - Items in repeated fields can be referenced by a - zero-based index: - - - jobs['step-id'].sparkJob.args[0] - - - Other examples: - - - jobs['step-id'].hadoopJob.properties['key'] - - jobs['step-id'].hadoopJob.args[0] - - jobs['step-id'].hiveJob.scriptVariables['key'] - - jobs['step-id'].hadoopJob.mainJarFileUri - - placement.clusterSelector.zone - - It may not be possible to parameterize maps and repeated - fields in their entirety since only individual map values - and individual items in repeated fields can be referenced. - For example, the following field paths are invalid: - - - placement.clusterSelector.clusterLabels - - jobs['step-id'].sparkJob.args - description (str): - Optional. Brief description of the parameter. - Must not exceed 1024 characters. - validation (google.cloud.dataproc_v1beta2.types.ParameterValidation): - Optional. Validation rules to be applied to - this parameter's value. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - fields = proto.RepeatedField( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - validation = proto.Field( - proto.MESSAGE, - number=4, - message='ParameterValidation', - ) - - -class ParameterValidation(proto.Message): - r"""Configuration for parameter validation. - Attributes: - regex (google.cloud.dataproc_v1beta2.types.RegexValidation): - Validation based on regular expressions. - values (google.cloud.dataproc_v1beta2.types.ValueValidation): - Validation based on a list of allowed values. - """ - - regex = proto.Field( - proto.MESSAGE, - number=1, - oneof='validation_type', - message='RegexValidation', - ) - values = proto.Field( - proto.MESSAGE, - number=2, - oneof='validation_type', - message='ValueValidation', - ) - - -class RegexValidation(proto.Message): - r"""Validation based on regular expressions. - Attributes: - regexes (Sequence[str]): - Required. RE2 regular expressions used to - validate the parameter's value. The value must - match the regex in its entirety (substring - matches are not sufficient). - """ - - regexes = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class ValueValidation(proto.Message): - r"""Validation based on a list of allowed values. - Attributes: - values (Sequence[str]): - Required. List of allowed values for the - parameter. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class WorkflowMetadata(proto.Message): - r"""A Dataproc workflow template resource. - Attributes: - template (str): - Output only. The resource name of the workflow template as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates``, the resource - name of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates``, the - resource name of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): - Output only. The version of template at the - time of workflow instantiation. - create_cluster (google.cloud.dataproc_v1beta2.types.ClusterOperation): - Output only. The create cluster operation - metadata. - graph (google.cloud.dataproc_v1beta2.types.WorkflowGraph): - Output only. The workflow graph. - delete_cluster (google.cloud.dataproc_v1beta2.types.ClusterOperation): - Output only. The delete cluster operation - metadata. - state (google.cloud.dataproc_v1beta2.types.WorkflowMetadata.State): - Output only. The workflow state. - cluster_name (str): - Output only. The name of the target cluster. - parameters (Sequence[google.cloud.dataproc_v1beta2.types.WorkflowMetadata.ParametersEntry]): - Map from parameter names to values that were - used for those parameters. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Workflow start time. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Workflow end time. - cluster_uuid (str): - Output only. The UUID of target cluster. - dag_timeout (google.protobuf.duration_pb2.Duration): - Output only. The timeout duration for the DAG of jobs. - Minimum timeout duration is 10 minutes and maximum is 24 - hours, expressed as a - [google.protobuf.Duration][https://developers.google.com/protocol-buffers/docs/proto3#json_mapping]. - For example, "1800" = 1800 seconds/30 minutes duration. - dag_start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. DAG start time, which is only set for workflows - with - [dag_timeout][google.cloud.dataproc.v1beta2.WorkflowMetadata.dag_timeout] - when the DAG begins. - dag_end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. DAG end time, which is only set for workflows - with - [dag_timeout][google.cloud.dataproc.v1beta2.WorkflowMetadata.dag_timeout] - when the DAG ends. - """ - class State(proto.Enum): - r"""The operation state.""" - UNKNOWN = 0 - PENDING = 1 - RUNNING = 2 - DONE = 3 - - template = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.INT32, - number=2, - ) - create_cluster = proto.Field( - proto.MESSAGE, - number=3, - message='ClusterOperation', - ) - graph = proto.Field( - proto.MESSAGE, - number=4, - message='WorkflowGraph', - ) - delete_cluster = proto.Field( - proto.MESSAGE, - number=5, - message='ClusterOperation', - ) - state = proto.Field( - proto.ENUM, - number=6, - enum=State, - ) - cluster_name = proto.Field( - proto.STRING, - number=7, - ) - parameters = proto.MapField( - proto.STRING, - proto.STRING, - number=8, - ) - start_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - cluster_uuid = proto.Field( - proto.STRING, - number=11, - ) - dag_timeout = proto.Field( - proto.MESSAGE, - number=12, - message=duration_pb2.Duration, - ) - dag_start_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - dag_end_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - - -class ClusterOperation(proto.Message): - r"""The cluster operation triggered by a workflow. - Attributes: - operation_id (str): - Output only. The id of the cluster operation. - error (str): - Output only. Error, if operation failed. - done (bool): - Output only. Indicates the operation is done. - """ - - operation_id = proto.Field( - proto.STRING, - number=1, - ) - error = proto.Field( - proto.STRING, - number=2, - ) - done = proto.Field( - proto.BOOL, - number=3, - ) - - -class WorkflowGraph(proto.Message): - r"""The workflow graph. - Attributes: - nodes (Sequence[google.cloud.dataproc_v1beta2.types.WorkflowNode]): - Output only. The workflow nodes. - """ - - nodes = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='WorkflowNode', - ) - - -class WorkflowNode(proto.Message): - r"""The workflow node. - Attributes: - step_id (str): - Output only. The name of the node. - prerequisite_step_ids (Sequence[str]): - Output only. Node's prerequisite nodes. - job_id (str): - Output only. The job id; populated after the - node enters RUNNING state. - state (google.cloud.dataproc_v1beta2.types.WorkflowNode.NodeState): - Output only. The node state. - error (str): - Output only. The error detail. - """ - class NodeState(proto.Enum): - r"""The workflow node state.""" - NODE_STATUS_UNSPECIFIED = 0 - BLOCKED = 1 - RUNNABLE = 2 - RUNNING = 3 - COMPLETED = 4 - FAILED = 5 - - step_id = proto.Field( - proto.STRING, - number=1, - ) - prerequisite_step_ids = proto.RepeatedField( - proto.STRING, - number=2, - ) - job_id = proto.Field( - proto.STRING, - number=3, - ) - state = proto.Field( - proto.ENUM, - number=5, - enum=NodeState, - ) - error = proto.Field( - proto.STRING, - number=6, - ) - - -class CreateWorkflowTemplateRequest(proto.Message): - r"""A request to create a workflow template. - Attributes: - parent (str): - Required. The resource name of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,create``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.create``, the - resource name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): - Required. The Dataproc workflow template to - create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - template = proto.Field( - proto.MESSAGE, - number=2, - message='WorkflowTemplate', - ) - - -class GetWorkflowTemplateRequest(proto.Message): - r"""A request to fetch a workflow template. - Attributes: - name (str): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.get``, the - resource name of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.get``, the - resource name of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): - Optional. The version of workflow template to - retrieve. Only previously instantiated versions - can be retrieved. - If unspecified, retrieves the current version. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.INT32, - number=2, - ) - - -class InstantiateWorkflowTemplateRequest(proto.Message): - r"""A request to instantiate a workflow template. - Attributes: - name (str): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): - Optional. The version of workflow template to - instantiate. If specified, the workflow will be - instantiated only if the current version of the - workflow template has the supplied version. - This option cannot be used to instantiate a - previous version of workflow template. - instance_id (str): - Deprecated. Please use ``request_id`` field instead. - request_id (str): - Optional. A tag that prevents multiple concurrent workflow - instances with the same tag from running. This mitigates - risk of concurrent instances started due to retries. - - It is recommended to always set this value to a - `UUID `__. - - The tag must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - parameters (Sequence[google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest.ParametersEntry]): - Optional. Map from parameter names to values - that should be used for those parameters. Values - may not exceed 100 characters. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.INT32, - number=2, - ) - instance_id = proto.Field( - proto.STRING, - number=3, - ) - request_id = proto.Field( - proto.STRING, - number=5, - ) - parameters = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - - -class InstantiateInlineWorkflowTemplateRequest(proto.Message): - r"""A request to instantiate an inline workflow template. - Attributes: - parent (str): - Required. The resource name of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For - ``projects.regions.workflowTemplates,instantiateinline``, - the resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For - ``projects.locations.workflowTemplates.instantiateinline``, - the resource name of the location has the following - format: ``projects/{project_id}/locations/{location}`` - template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): - Required. The workflow template to - instantiate. - instance_id (str): - Deprecated. Please use ``request_id`` field instead. - request_id (str): - Optional. A tag that prevents multiple concurrent workflow - instances with the same tag from running. This mitigates - risk of concurrent instances started due to retries. - - It is recommended to always set this value to a - `UUID `__. - - The tag must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - template = proto.Field( - proto.MESSAGE, - number=2, - message='WorkflowTemplate', - ) - instance_id = proto.Field( - proto.STRING, - number=3, - ) - request_id = proto.Field( - proto.STRING, - number=4, - ) - - -class UpdateWorkflowTemplateRequest(proto.Message): - r"""A request to update a workflow template. - Attributes: - template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): - Required. The updated workflow template. - - The ``template.version`` field must match the current - version. - """ - - template = proto.Field( - proto.MESSAGE, - number=1, - message='WorkflowTemplate', - ) - - -class ListWorkflowTemplatesRequest(proto.Message): - r"""A request to list workflow templates in a project. - Attributes: - parent (str): - Required. The resource name of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,list``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.list``, the - resource name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - page_size (int): - Optional. The maximum number of results to - return in each response. - page_token (str): - Optional. The page token, returned by a - previous call, to request the next page of - results. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - - -class ListWorkflowTemplatesResponse(proto.Message): - r"""A response to a request to list workflow templates in a - project. - - Attributes: - templates (Sequence[google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): - Output only. WorkflowTemplates list. - next_page_token (str): - Output only. This token is included in the response if there - are more results to fetch. To fetch additional results, - provide this value as the page_token in a subsequent - ListWorkflowTemplatesRequest. - """ - - @property - def raw_page(self): - return self - - templates = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='WorkflowTemplate', - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteWorkflowTemplateRequest(proto.Message): - r"""A request to delete a workflow template. - Currently started workflows will remain running. - - Attributes: - name (str): - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.delete``, the - resource name of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.instantiate``, - the resource name of the template has the following - format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): - Optional. The version of workflow template to - delete. If specified, will only delete the - template if the current server version matches - specified version. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta2/mypy.ini b/owl-bot-staging/v1beta2/mypy.ini deleted file mode 100644 index 4505b485..00000000 --- a/owl-bot-staging/v1beta2/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1beta2/noxfile.py b/owl-bot-staging/v1beta2/noxfile.py deleted file mode 100644 index 95c232be..00000000 --- a/owl-bot-staging/v1beta2/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/dataproc_v1beta2/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.7') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.6') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1beta2/scripts/fixup_dataproc_v1beta2_keywords.py b/owl-bot-staging/v1beta2/scripts/fixup_dataproc_v1beta2_keywords.py deleted file mode 100644 index b9e52549..00000000 --- a/owl-bot-staging/v1beta2/scripts/fixup_dataproc_v1beta2_keywords.py +++ /dev/null @@ -1,200 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class dataprocCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'cancel_job': ('project_id', 'region', 'job_id', ), - 'create_autoscaling_policy': ('parent', 'policy', ), - 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', ), - 'create_workflow_template': ('parent', 'template', ), - 'delete_autoscaling_policy': ('name', ), - 'delete_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), - 'delete_job': ('project_id', 'region', 'job_id', ), - 'delete_workflow_template': ('name', 'version', ), - 'diagnose_cluster': ('project_id', 'region', 'cluster_name', ), - 'get_autoscaling_policy': ('name', ), - 'get_cluster': ('project_id', 'region', 'cluster_name', ), - 'get_job': ('project_id', 'region', 'job_id', ), - 'get_workflow_template': ('name', 'version', ), - 'instantiate_inline_workflow_template': ('parent', 'template', 'instance_id', 'request_id', ), - 'instantiate_workflow_template': ('name', 'version', 'instance_id', 'request_id', 'parameters', ), - 'list_autoscaling_policies': ('parent', 'page_size', 'page_token', ), - 'list_clusters': ('project_id', 'region', 'filter', 'page_size', 'page_token', ), - 'list_jobs': ('project_id', 'region', 'page_size', 'page_token', 'cluster_name', 'job_state_matcher', 'filter', ), - 'list_workflow_templates': ('parent', 'page_size', 'page_token', ), - 'submit_job': ('project_id', 'region', 'job', 'request_id', ), - 'submit_job_as_operation': ('project_id', 'region', 'job', 'request_id', ), - 'update_autoscaling_policy': ('policy', ), - 'update_cluster': ('project_id', 'region', 'cluster_name', 'cluster', 'update_mask', 'graceful_decommission_timeout', 'request_id', ), - 'update_job': ('project_id', 'region', 'job_id', 'job', 'update_mask', ), - 'update_workflow_template': ('template', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=dataprocCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the dataproc client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta2/setup.py b/owl-bot-staging/v1beta2/setup.py deleted file mode 100644 index 63f4596f..00000000 --- a/owl-bot-staging/v1beta2/setup.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-cloud-dataproc', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google', 'google.cloud'), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.15.0', - 'packaging >= 14.3', ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1beta2/tests/__init__.py b/owl-bot-staging/v1beta2/tests/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1beta2/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta2/tests/unit/__init__.py b/owl-bot-staging/v1beta2/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1beta2/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta2/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta2/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1beta2/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/__init__.py b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py deleted file mode 100644 index 435a442e..00000000 --- a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py +++ /dev/null @@ -1,2293 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import AutoscalingPolicyServiceAsyncClient -from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import AutoscalingPolicyServiceClient -from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import pagers -from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import transports -from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.dataproc_v1beta2.types import autoscaling_policies -from google.oauth2 import service_account -from google.protobuf import duration_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(None) is None - assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - AutoscalingPolicyServiceClient, - AutoscalingPolicyServiceAsyncClient, -]) -def test_autoscaling_policy_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - AutoscalingPolicyServiceClient, - AutoscalingPolicyServiceAsyncClient, -]) -def test_autoscaling_policy_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), - (transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_autoscaling_policy_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - AutoscalingPolicyServiceClient, - AutoscalingPolicyServiceAsyncClient, -]) -def test_autoscaling_policy_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_autoscaling_policy_service_client_get_transport_class(): - transport = AutoscalingPolicyServiceClient.get_transport_class() - available_transports = [ - transports.AutoscalingPolicyServiceGrpcTransport, - ] - assert transport in available_transports - - transport = AutoscalingPolicyServiceClient.get_transport_class("grpc") - assert transport == transports.AutoscalingPolicyServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), - (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(AutoscalingPolicyServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceClient)) -@mock.patch.object(AutoscalingPolicyServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceAsyncClient)) -def test_autoscaling_policy_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(AutoscalingPolicyServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(AutoscalingPolicyServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc", "true"), - (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc", "false"), - (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(AutoscalingPolicyServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceClient)) -@mock.patch.object(AutoscalingPolicyServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalingPolicyServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_autoscaling_policy_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), - (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_autoscaling_policy_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (AutoscalingPolicyServiceClient, transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), - (AutoscalingPolicyServiceAsyncClient, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_autoscaling_policy_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_autoscaling_policy_service_client_client_options_from_dict(): - with mock.patch('google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = AutoscalingPolicyServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.CreateAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy( - id='id_value', - name='name_value', - basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(graceful_decommission_timeout=duration_pb2.Duration(seconds=751))), - ) - response = client.create_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, autoscaling_policies.AutoscalingPolicy) - assert response.id == 'id_value' - assert response.name == 'name_value' - - -def test_create_autoscaling_policy_from_dict(): - test_create_autoscaling_policy(request_type=dict) - - -def test_create_autoscaling_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - client.create_autoscaling_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() - - -@pytest.mark.asyncio -async def test_create_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.CreateAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy( - id='id_value', - name='name_value', - )) - response = await client.create_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, autoscaling_policies.AutoscalingPolicy) - assert response.id == 'id_value' - assert response.name == 'name_value' - - -@pytest.mark.asyncio -async def test_create_autoscaling_policy_async_from_dict(): - await test_create_autoscaling_policy_async(request_type=dict) - - -def test_create_autoscaling_policy_field_headers(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.CreateAutoscalingPolicyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - call.return_value = autoscaling_policies.AutoscalingPolicy() - client.create_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_autoscaling_policy_field_headers_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.CreateAutoscalingPolicyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) - await client.create_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_autoscaling_policy_flattened(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_autoscaling_policy( - parent='parent_value', - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') - - -def test_create_autoscaling_policy_flattened_error(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_autoscaling_policy( - autoscaling_policies.CreateAutoscalingPolicyRequest(), - parent='parent_value', - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_create_autoscaling_policy_flattened_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_autoscaling_policy( - parent='parent_value', - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') - - -@pytest.mark.asyncio -async def test_create_autoscaling_policy_flattened_error_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_autoscaling_policy( - autoscaling_policies.CreateAutoscalingPolicyRequest(), - parent='parent_value', - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - -def test_update_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.UpdateAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy( - id='id_value', - name='name_value', - basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(graceful_decommission_timeout=duration_pb2.Duration(seconds=751))), - ) - response = client.update_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, autoscaling_policies.AutoscalingPolicy) - assert response.id == 'id_value' - assert response.name == 'name_value' - - -def test_update_autoscaling_policy_from_dict(): - test_update_autoscaling_policy(request_type=dict) - - -def test_update_autoscaling_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - client.update_autoscaling_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() - - -@pytest.mark.asyncio -async def test_update_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.UpdateAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy( - id='id_value', - name='name_value', - )) - response = await client.update_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, autoscaling_policies.AutoscalingPolicy) - assert response.id == 'id_value' - assert response.name == 'name_value' - - -@pytest.mark.asyncio -async def test_update_autoscaling_policy_async_from_dict(): - await test_update_autoscaling_policy_async(request_type=dict) - - -def test_update_autoscaling_policy_field_headers(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.UpdateAutoscalingPolicyRequest() - - request.policy.name = 'policy.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - call.return_value = autoscaling_policies.AutoscalingPolicy() - client.update_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'policy.name=policy.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_autoscaling_policy_field_headers_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.UpdateAutoscalingPolicyRequest() - - request.policy.name = 'policy.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) - await client.update_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'policy.name=policy.name/value', - ) in kw['metadata'] - - -def test_update_autoscaling_policy_flattened(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_autoscaling_policy( - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') - - -def test_update_autoscaling_policy_flattened_error(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_autoscaling_policy( - autoscaling_policies.UpdateAutoscalingPolicyRequest(), - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_update_autoscaling_policy_flattened_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_autoscaling_policy( - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id='id_value') - - -@pytest.mark.asyncio -async def test_update_autoscaling_policy_flattened_error_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_autoscaling_policy( - autoscaling_policies.UpdateAutoscalingPolicyRequest(), - policy=autoscaling_policies.AutoscalingPolicy(id='id_value'), - ) - - -def test_get_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.GetAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy( - id='id_value', - name='name_value', - basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(graceful_decommission_timeout=duration_pb2.Duration(seconds=751))), - ) - response = client.get_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, autoscaling_policies.AutoscalingPolicy) - assert response.id == 'id_value' - assert response.name == 'name_value' - - -def test_get_autoscaling_policy_from_dict(): - test_get_autoscaling_policy(request_type=dict) - - -def test_get_autoscaling_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - client.get_autoscaling_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() - - -@pytest.mark.asyncio -async def test_get_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.GetAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy( - id='id_value', - name='name_value', - )) - response = await client.get_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, autoscaling_policies.AutoscalingPolicy) - assert response.id == 'id_value' - assert response.name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_autoscaling_policy_async_from_dict(): - await test_get_autoscaling_policy_async(request_type=dict) - - -def test_get_autoscaling_policy_field_headers(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.GetAutoscalingPolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - call.return_value = autoscaling_policies.AutoscalingPolicy() - client.get_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_autoscaling_policy_field_headers_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.GetAutoscalingPolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) - await client.get_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_autoscaling_policy_flattened(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_autoscaling_policy( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_autoscaling_policy_flattened_error(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_autoscaling_policy( - autoscaling_policies.GetAutoscalingPolicyRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_autoscaling_policy_flattened_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.AutoscalingPolicy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.AutoscalingPolicy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_autoscaling_policy( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_autoscaling_policy_flattened_error_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_autoscaling_policy( - autoscaling_policies.GetAutoscalingPolicyRequest(), - name='name_value', - ) - - -def test_list_autoscaling_policies(transport: str = 'grpc', request_type=autoscaling_policies.ListAutoscalingPoliciesRequest): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_autoscaling_policies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAutoscalingPoliciesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_autoscaling_policies_from_dict(): - test_list_autoscaling_policies(request_type=dict) - - -def test_list_autoscaling_policies_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - client.list_autoscaling_policies() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() - - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.ListAutoscalingPoliciesRequest): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.ListAutoscalingPoliciesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_autoscaling_policies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAutoscalingPoliciesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_async_from_dict(): - await test_list_autoscaling_policies_async(request_type=dict) - - -def test_list_autoscaling_policies_field_headers(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.ListAutoscalingPoliciesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() - client.list_autoscaling_policies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_field_headers_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.ListAutoscalingPoliciesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.ListAutoscalingPoliciesResponse()) - await client.list_autoscaling_policies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_autoscaling_policies_flattened(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_autoscaling_policies( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_autoscaling_policies_flattened_error(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_autoscaling_policies( - autoscaling_policies.ListAutoscalingPoliciesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_flattened_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(autoscaling_policies.ListAutoscalingPoliciesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_autoscaling_policies( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_flattened_error_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_autoscaling_policies( - autoscaling_policies.ListAutoscalingPoliciesRequest(), - parent='parent_value', - ) - - -def test_list_autoscaling_policies_pager(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='abc', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[], - next_page_token='def', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='ghi', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_autoscaling_policies(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, autoscaling_policies.AutoscalingPolicy) - for i in results) - -def test_list_autoscaling_policies_pages(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='abc', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[], - next_page_token='def', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='ghi', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - ), - RuntimeError, - ) - pages = list(client.list_autoscaling_policies(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_async_pager(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='abc', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[], - next_page_token='def', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='ghi', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_autoscaling_policies(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, autoscaling_policies.AutoscalingPolicy) - for i in responses) - -@pytest.mark.asyncio -async def test_list_autoscaling_policies_async_pages(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_autoscaling_policies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='abc', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[], - next_page_token='def', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - ], - next_page_token='ghi', - ), - autoscaling_policies.ListAutoscalingPoliciesResponse( - policies=[ - autoscaling_policies.AutoscalingPolicy(), - autoscaling_policies.AutoscalingPolicy(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_autoscaling_policies(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_autoscaling_policy(transport: str = 'grpc', request_type=autoscaling_policies.DeleteAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_autoscaling_policy_from_dict(): - test_delete_autoscaling_policy(request_type=dict) - - -def test_delete_autoscaling_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - client.delete_autoscaling_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() - - -@pytest.mark.asyncio -async def test_delete_autoscaling_policy_async(transport: str = 'grpc_asyncio', request_type=autoscaling_policies.DeleteAutoscalingPolicyRequest): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_autoscaling_policy_async_from_dict(): - await test_delete_autoscaling_policy_async(request_type=dict) - - -def test_delete_autoscaling_policy_field_headers(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.DeleteAutoscalingPolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - call.return_value = None - client.delete_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_autoscaling_policy_field_headers_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = autoscaling_policies.DeleteAutoscalingPolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_autoscaling_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_autoscaling_policy_flattened(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_autoscaling_policy( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_autoscaling_policy_flattened_error(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_autoscaling_policy( - autoscaling_policies.DeleteAutoscalingPolicyRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_autoscaling_policy_flattened_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_autoscaling_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_autoscaling_policy( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_autoscaling_policy_flattened_error_async(): - client = AutoscalingPolicyServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_autoscaling_policy( - autoscaling_policies.DeleteAutoscalingPolicyRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.AutoscalingPolicyServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.AutoscalingPolicyServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AutoscalingPolicyServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.AutoscalingPolicyServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AutoscalingPolicyServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.AutoscalingPolicyServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = AutoscalingPolicyServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.AutoscalingPolicyServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.AutoscalingPolicyServiceGrpcTransport, - transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.AutoscalingPolicyServiceGrpcTransport, - ) - -def test_autoscaling_policy_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.AutoscalingPolicyServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_autoscaling_policy_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.AutoscalingPolicyServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_autoscaling_policy', - 'update_autoscaling_policy', - 'get_autoscaling_policy', - 'list_autoscaling_policies', - 'delete_autoscaling_policy', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - -@requires_google_auth_gte_1_25_0 -def test_autoscaling_policy_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.AutoscalingPolicyServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_autoscaling_policy_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.AutoscalingPolicyServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_autoscaling_policy_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.AutoscalingPolicyServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_autoscaling_policy_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - AutoscalingPolicyServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_autoscaling_policy_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - AutoscalingPolicyServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.AutoscalingPolicyServiceGrpcTransport, - transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_autoscaling_policy_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.AutoscalingPolicyServiceGrpcTransport, - transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_autoscaling_policy_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.AutoscalingPolicyServiceGrpcTransport, grpc_helpers), - (transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_autoscaling_policy_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "dataproc.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="dataproc.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.AutoscalingPolicyServiceGrpcTransport, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport]) -def test_autoscaling_policy_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_autoscaling_policy_service_host_no_port(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), - ) - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_autoscaling_policy_service_host_with_port(): - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), - ) - assert client.transport._host == 'dataproc.googleapis.com:8000' - -def test_autoscaling_policy_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.AutoscalingPolicyServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_autoscaling_policy_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.AutoscalingPolicyServiceGrpcTransport, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport]) -def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.AutoscalingPolicyServiceGrpcTransport, transports.AutoscalingPolicyServiceGrpcAsyncIOTransport]) -def test_autoscaling_policy_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_autoscaling_policy_path(): - project = "squid" - location = "clam" - autoscaling_policy = "whelk" - expected = "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format(project=project, location=location, autoscaling_policy=autoscaling_policy, ) - actual = AutoscalingPolicyServiceClient.autoscaling_policy_path(project, location, autoscaling_policy) - assert expected == actual - - -def test_parse_autoscaling_policy_path(): - expected = { - "project": "octopus", - "location": "oyster", - "autoscaling_policy": "nudibranch", - } - path = AutoscalingPolicyServiceClient.autoscaling_policy_path(**expected) - - # Check that the path construction is reversible. - actual = AutoscalingPolicyServiceClient.parse_autoscaling_policy_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = AutoscalingPolicyServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = AutoscalingPolicyServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = AutoscalingPolicyServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = AutoscalingPolicyServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = AutoscalingPolicyServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = AutoscalingPolicyServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = AutoscalingPolicyServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = AutoscalingPolicyServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = AutoscalingPolicyServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = AutoscalingPolicyServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = AutoscalingPolicyServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = AutoscalingPolicyServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = AutoscalingPolicyServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = AutoscalingPolicyServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = AutoscalingPolicyServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.AutoscalingPolicyServiceTransport, '_prep_wrapped_messages') as prep: - client = AutoscalingPolicyServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.AutoscalingPolicyServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = AutoscalingPolicyServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py deleted file mode 100644 index d41f9004..00000000 --- a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py +++ /dev/null @@ -1,2258 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.dataproc_v1beta2.services.cluster_controller import ClusterControllerAsyncClient -from google.cloud.dataproc_v1beta2.services.cluster_controller import ClusterControllerClient -from google.cloud.dataproc_v1beta2.services.cluster_controller import pagers -from google.cloud.dataproc_v1beta2.services.cluster_controller import transports -from google.cloud.dataproc_v1beta2.services.cluster_controller.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.dataproc_v1beta2.types import clusters -from google.cloud.dataproc_v1beta2.types import operations -from google.cloud.dataproc_v1beta2.types import shared -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert ClusterControllerClient._get_default_mtls_endpoint(None) is None - assert ClusterControllerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert ClusterControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert ClusterControllerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert ClusterControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert ClusterControllerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - ClusterControllerClient, - ClusterControllerAsyncClient, -]) -def test_cluster_controller_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - ClusterControllerClient, - ClusterControllerAsyncClient, -]) -def test_cluster_controller_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.ClusterControllerGrpcTransport, "grpc"), - (transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_cluster_controller_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - ClusterControllerClient, - ClusterControllerAsyncClient, -]) -def test_cluster_controller_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_cluster_controller_client_get_transport_class(): - transport = ClusterControllerClient.get_transport_class() - available_transports = [ - transports.ClusterControllerGrpcTransport, - ] - assert transport in available_transports - - transport = ClusterControllerClient.get_transport_class("grpc") - assert transport == transports.ClusterControllerGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), - (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(ClusterControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerClient)) -@mock.patch.object(ClusterControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerAsyncClient)) -def test_cluster_controller_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(ClusterControllerClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(ClusterControllerClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc", "true"), - (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc", "false"), - (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(ClusterControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerClient)) -@mock.patch.object(ClusterControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterControllerAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_cluster_controller_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), - (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_cluster_controller_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), - (ClusterControllerAsyncClient, transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_cluster_controller_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_cluster_controller_client_client_options_from_dict(): - with mock.patch('google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = ClusterControllerClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_cluster(transport: str = 'grpc', request_type=clusters.CreateClusterRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.CreateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_cluster_from_dict(): - test_create_cluster(request_type=dict) - - -def test_create_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - client.create_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.CreateClusterRequest() - - -@pytest.mark.asyncio -async def test_create_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.CreateClusterRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.CreateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_cluster_async_from_dict(): - await test_create_cluster_async(request_type=dict) - - -def test_create_cluster_flattened(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_cluster( - project_id='project_id_value', - region='region_value', - cluster=clusters.Cluster(project_id='project_id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster == clusters.Cluster(project_id='project_id_value') - - -def test_create_cluster_flattened_error(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_cluster( - clusters.CreateClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster=clusters.Cluster(project_id='project_id_value'), - ) - - -@pytest.mark.asyncio -async def test_create_cluster_flattened_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_cluster( - project_id='project_id_value', - region='region_value', - cluster=clusters.Cluster(project_id='project_id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster == clusters.Cluster(project_id='project_id_value') - - -@pytest.mark.asyncio -async def test_create_cluster_flattened_error_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_cluster( - clusters.CreateClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster=clusters.Cluster(project_id='project_id_value'), - ) - - -def test_update_cluster(transport: str = 'grpc', request_type=clusters.UpdateClusterRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.UpdateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_cluster_from_dict(): - test_update_cluster(request_type=dict) - - -def test_update_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - client.update_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.UpdateClusterRequest() - - -@pytest.mark.asyncio -async def test_update_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.UpdateClusterRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.UpdateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_cluster_async_from_dict(): - await test_update_cluster_async(request_type=dict) - - -def test_update_cluster_flattened(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - cluster=clusters.Cluster(project_id='project_id_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - assert args[0].cluster == clusters.Cluster(project_id='project_id_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_cluster_flattened_error(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_cluster( - clusters.UpdateClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - cluster=clusters.Cluster(project_id='project_id_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_cluster_flattened_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - cluster=clusters.Cluster(project_id='project_id_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - assert args[0].cluster == clusters.Cluster(project_id='project_id_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_cluster_flattened_error_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_cluster( - clusters.UpdateClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - cluster=clusters.Cluster(project_id='project_id_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_cluster(transport: str = 'grpc', request_type=clusters.DeleteClusterRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.DeleteClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_cluster_from_dict(): - test_delete_cluster(request_type=dict) - - -def test_delete_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - client.delete_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.DeleteClusterRequest() - - -@pytest.mark.asyncio -async def test_delete_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.DeleteClusterRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.DeleteClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_cluster_async_from_dict(): - await test_delete_cluster_async(request_type=dict) - - -def test_delete_cluster_flattened(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - - -def test_delete_cluster_flattened_error(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_cluster( - clusters.DeleteClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_cluster_flattened_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - - -@pytest.mark.asyncio -async def test_delete_cluster_flattened_error_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_cluster( - clusters.DeleteClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - -def test_get_cluster(transport: str = 'grpc', request_type=clusters.GetClusterRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = clusters.Cluster( - project_id='project_id_value', - cluster_name='cluster_name_value', - cluster_uuid='cluster_uuid_value', - ) - response = client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.GetClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, clusters.Cluster) - assert response.project_id == 'project_id_value' - assert response.cluster_name == 'cluster_name_value' - assert response.cluster_uuid == 'cluster_uuid_value' - - -def test_get_cluster_from_dict(): - test_get_cluster(request_type=dict) - - -def test_get_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - client.get_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.GetClusterRequest() - - -@pytest.mark.asyncio -async def test_get_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.GetClusterRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(clusters.Cluster( - project_id='project_id_value', - cluster_name='cluster_name_value', - cluster_uuid='cluster_uuid_value', - )) - response = await client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.GetClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, clusters.Cluster) - assert response.project_id == 'project_id_value' - assert response.cluster_name == 'cluster_name_value' - assert response.cluster_uuid == 'cluster_uuid_value' - - -@pytest.mark.asyncio -async def test_get_cluster_async_from_dict(): - await test_get_cluster_async(request_type=dict) - - -def test_get_cluster_flattened(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = clusters.Cluster() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - - -def test_get_cluster_flattened_error(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_cluster( - clusters.GetClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - -@pytest.mark.asyncio -async def test_get_cluster_flattened_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = clusters.Cluster() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(clusters.Cluster()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - - -@pytest.mark.asyncio -async def test_get_cluster_flattened_error_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_cluster( - clusters.GetClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - -def test_list_clusters(transport: str = 'grpc', request_type=clusters.ListClustersRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = clusters.ListClustersResponse( - next_page_token='next_page_token_value', - ) - response = client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.ListClustersRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListClustersPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_clusters_from_dict(): - test_list_clusters(request_type=dict) - - -def test_list_clusters_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - client.list_clusters() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.ListClustersRequest() - - -@pytest.mark.asyncio -async def test_list_clusters_async(transport: str = 'grpc_asyncio', request_type=clusters.ListClustersRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(clusters.ListClustersResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.ListClustersRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListClustersAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_clusters_async_from_dict(): - await test_list_clusters_async(request_type=dict) - - -def test_list_clusters_flattened(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = clusters.ListClustersResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_clusters( - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].filter == 'filter_value' - - -def test_list_clusters_flattened_error(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_clusters( - clusters.ListClustersRequest(), - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - -@pytest.mark.asyncio -async def test_list_clusters_flattened_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = clusters.ListClustersResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(clusters.ListClustersResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_clusters( - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].filter == 'filter_value' - - -@pytest.mark.asyncio -async def test_list_clusters_flattened_error_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_clusters( - clusters.ListClustersRequest(), - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - -def test_list_clusters_pager(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - clusters.Cluster(), - ], - next_page_token='abc', - ), - clusters.ListClustersResponse( - clusters=[], - next_page_token='def', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - ], - next_page_token='ghi', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - ], - ), - RuntimeError, - ) - - metadata = () - pager = client.list_clusters(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, clusters.Cluster) - for i in results) - -def test_list_clusters_pages(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - clusters.Cluster(), - ], - next_page_token='abc', - ), - clusters.ListClustersResponse( - clusters=[], - next_page_token='def', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - ], - next_page_token='ghi', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - ], - ), - RuntimeError, - ) - pages = list(client.list_clusters(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_clusters_async_pager(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - clusters.Cluster(), - ], - next_page_token='abc', - ), - clusters.ListClustersResponse( - clusters=[], - next_page_token='def', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - ], - next_page_token='ghi', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_clusters(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, clusters.Cluster) - for i in responses) - -@pytest.mark.asyncio -async def test_list_clusters_async_pages(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - clusters.Cluster(), - ], - next_page_token='abc', - ), - clusters.ListClustersResponse( - clusters=[], - next_page_token='def', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - ], - next_page_token='ghi', - ), - clusters.ListClustersResponse( - clusters=[ - clusters.Cluster(), - clusters.Cluster(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_clusters(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_diagnose_cluster(transport: str = 'grpc', request_type=clusters.DiagnoseClusterRequest): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.diagnose_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.diagnose_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.DiagnoseClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_diagnose_cluster_from_dict(): - test_diagnose_cluster(request_type=dict) - - -def test_diagnose_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.diagnose_cluster), - '__call__') as call: - client.diagnose_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.DiagnoseClusterRequest() - - -@pytest.mark.asyncio -async def test_diagnose_cluster_async(transport: str = 'grpc_asyncio', request_type=clusters.DiagnoseClusterRequest): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.diagnose_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.diagnose_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == clusters.DiagnoseClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_diagnose_cluster_async_from_dict(): - await test_diagnose_cluster_async(request_type=dict) - - -def test_diagnose_cluster_flattened(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.diagnose_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.diagnose_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - - -def test_diagnose_cluster_flattened_error(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.diagnose_cluster( - clusters.DiagnoseClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - -@pytest.mark.asyncio -async def test_diagnose_cluster_flattened_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.diagnose_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.diagnose_cluster( - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].cluster_name == 'cluster_name_value' - - -@pytest.mark.asyncio -async def test_diagnose_cluster_flattened_error_async(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.diagnose_cluster( - clusters.DiagnoseClusterRequest(), - project_id='project_id_value', - region='region_value', - cluster_name='cluster_name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.ClusterControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.ClusterControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ClusterControllerClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.ClusterControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ClusterControllerClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.ClusterControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = ClusterControllerClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.ClusterControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.ClusterControllerGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.ClusterControllerGrpcTransport, - transports.ClusterControllerGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.ClusterControllerGrpcTransport, - ) - -def test_cluster_controller_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.ClusterControllerTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_cluster_controller_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.ClusterControllerTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_cluster', - 'update_cluster', - 'delete_cluster', - 'get_cluster', - 'list_clusters', - 'diagnose_cluster', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_cluster_controller_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ClusterControllerTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_cluster_controller_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ClusterControllerTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_cluster_controller_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ClusterControllerTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_cluster_controller_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ClusterControllerClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_cluster_controller_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ClusterControllerClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ClusterControllerGrpcTransport, - transports.ClusterControllerGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_cluster_controller_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ClusterControllerGrpcTransport, - transports.ClusterControllerGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_cluster_controller_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.ClusterControllerGrpcTransport, grpc_helpers), - (transports.ClusterControllerGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_cluster_controller_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "dataproc.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="dataproc.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.ClusterControllerGrpcTransport, transports.ClusterControllerGrpcAsyncIOTransport]) -def test_cluster_controller_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_cluster_controller_host_no_port(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), - ) - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_cluster_controller_host_with_port(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), - ) - assert client.transport._host == 'dataproc.googleapis.com:8000' - -def test_cluster_controller_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ClusterControllerGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_cluster_controller_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ClusterControllerGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ClusterControllerGrpcTransport, transports.ClusterControllerGrpcAsyncIOTransport]) -def test_cluster_controller_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ClusterControllerGrpcTransport, transports.ClusterControllerGrpcAsyncIOTransport]) -def test_cluster_controller_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_cluster_controller_grpc_lro_client(): - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_cluster_controller_grpc_lro_async_client(): - client = ClusterControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_cluster_path(): - project = "squid" - location = "clam" - cluster = "whelk" - expected = "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) - actual = ClusterControllerClient.cluster_path(project, location, cluster) - assert expected == actual - - -def test_parse_cluster_path(): - expected = { - "project": "octopus", - "location": "oyster", - "cluster": "nudibranch", - } - path = ClusterControllerClient.cluster_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_cluster_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = ClusterControllerClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = ClusterControllerClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = ClusterControllerClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = ClusterControllerClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = ClusterControllerClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = ClusterControllerClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = ClusterControllerClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = ClusterControllerClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = ClusterControllerClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = ClusterControllerClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.ClusterControllerTransport, '_prep_wrapped_messages') as prep: - client = ClusterControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.ClusterControllerTransport, '_prep_wrapped_messages') as prep: - transport_class = ClusterControllerClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py deleted file mode 100644 index 246d7dc4..00000000 --- a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py +++ /dev/null @@ -1,2371 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.dataproc_v1beta2.services.job_controller import JobControllerAsyncClient -from google.cloud.dataproc_v1beta2.services.job_controller import JobControllerClient -from google.cloud.dataproc_v1beta2.services.job_controller import pagers -from google.cloud.dataproc_v1beta2.services.job_controller import transports -from google.cloud.dataproc_v1beta2.services.job_controller.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.dataproc_v1beta2.types import jobs -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert JobControllerClient._get_default_mtls_endpoint(None) is None - assert JobControllerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert JobControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert JobControllerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert JobControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert JobControllerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - JobControllerClient, - JobControllerAsyncClient, -]) -def test_job_controller_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - JobControllerClient, - JobControllerAsyncClient, -]) -def test_job_controller_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.JobControllerGrpcTransport, "grpc"), - (transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_controller_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - JobControllerClient, - JobControllerAsyncClient, -]) -def test_job_controller_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_job_controller_client_get_transport_class(): - transport = JobControllerClient.get_transport_class() - available_transports = [ - transports.JobControllerGrpcTransport, - ] - assert transport in available_transports - - transport = JobControllerClient.get_transport_class("grpc") - assert transport == transports.JobControllerGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), - (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(JobControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerClient)) -@mock.patch.object(JobControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerAsyncClient)) -def test_job_controller_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(JobControllerClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(JobControllerClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (JobControllerClient, transports.JobControllerGrpcTransport, "grpc", "true"), - (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (JobControllerClient, transports.JobControllerGrpcTransport, "grpc", "false"), - (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(JobControllerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerClient)) -@mock.patch.object(JobControllerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobControllerAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_job_controller_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), - (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_controller_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), - (JobControllerAsyncClient, transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_controller_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_job_controller_client_client_options_from_dict(): - with mock.patch('google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = JobControllerClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_submit_job(transport: str = 'grpc', request_type=jobs.SubmitJobRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job( - submitted_by='submitted_by_value', - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), - ) - response = client.submit_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.SubmitJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.submitted_by == 'submitted_by_value' - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -def test_submit_job_from_dict(): - test_submit_job(request_type=dict) - - -def test_submit_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job), - '__call__') as call: - client.submit_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.SubmitJobRequest() - - -@pytest.mark.asyncio -async def test_submit_job_async(transport: str = 'grpc_asyncio', request_type=jobs.SubmitJobRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( - submitted_by='submitted_by_value', - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - )) - response = await client.submit_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.SubmitJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.submitted_by == 'submitted_by_value' - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -@pytest.mark.asyncio -async def test_submit_job_async_from_dict(): - await test_submit_job_async(request_type=dict) - - -def test_submit_job_flattened(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.submit_job( - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) - - -def test_submit_job_flattened_error(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.submit_job( - jobs.SubmitJobRequest(), - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - -@pytest.mark.asyncio -async def test_submit_job_flattened_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.submit_job( - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) - - -@pytest.mark.asyncio -async def test_submit_job_flattened_error_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.submit_job( - jobs.SubmitJobRequest(), - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - -def test_submit_job_as_operation(transport: str = 'grpc', request_type=jobs.SubmitJobRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job_as_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.submit_job_as_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.SubmitJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_submit_job_as_operation_from_dict(): - test_submit_job_as_operation(request_type=dict) - - -def test_submit_job_as_operation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job_as_operation), - '__call__') as call: - client.submit_job_as_operation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.SubmitJobRequest() - - -@pytest.mark.asyncio -async def test_submit_job_as_operation_async(transport: str = 'grpc_asyncio', request_type=jobs.SubmitJobRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job_as_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.submit_job_as_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.SubmitJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_submit_job_as_operation_async_from_dict(): - await test_submit_job_as_operation_async(request_type=dict) - - -def test_submit_job_as_operation_flattened(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job_as_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.submit_job_as_operation( - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) - - -def test_submit_job_as_operation_flattened_error(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.submit_job_as_operation( - jobs.SubmitJobRequest(), - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - -@pytest.mark.asyncio -async def test_submit_job_as_operation_flattened_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.submit_job_as_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.submit_job_as_operation( - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job == jobs.Job(reference=jobs.JobReference(project_id='project_id_value')) - - -@pytest.mark.asyncio -async def test_submit_job_as_operation_flattened_error_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.submit_job_as_operation( - jobs.SubmitJobRequest(), - project_id='project_id_value', - region='region_value', - job=jobs.Job(reference=jobs.JobReference(project_id='project_id_value')), - ) - - -def test_get_job(transport: str = 'grpc', request_type=jobs.GetJobRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job( - submitted_by='submitted_by_value', - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), - ) - response = client.get_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.GetJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.submitted_by == 'submitted_by_value' - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -def test_get_job_from_dict(): - test_get_job(request_type=dict) - - -def test_get_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_job), - '__call__') as call: - client.get_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.GetJobRequest() - - -@pytest.mark.asyncio -async def test_get_job_async(transport: str = 'grpc_asyncio', request_type=jobs.GetJobRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( - submitted_by='submitted_by_value', - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - )) - response = await client.get_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.GetJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.submitted_by == 'submitted_by_value' - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -@pytest.mark.asyncio -async def test_get_job_async_from_dict(): - await test_get_job_async(request_type=dict) - - -def test_get_job_flattened(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_job( - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job_id == 'job_id_value' - - -def test_get_job_flattened_error(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_job( - jobs.GetJobRequest(), - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - -@pytest.mark.asyncio -async def test_get_job_flattened_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_job( - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job_id == 'job_id_value' - - -@pytest.mark.asyncio -async def test_get_job_flattened_error_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_job( - jobs.GetJobRequest(), - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - -def test_list_jobs(transport: str = 'grpc', request_type=jobs.ListJobsRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.ListJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.ListJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_jobs_from_dict(): - test_list_jobs(request_type=dict) - - -def test_list_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - client.list_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.ListJobsRequest() - - -@pytest.mark.asyncio -async def test_list_jobs_async(transport: str = 'grpc_asyncio', request_type=jobs.ListJobsRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.ListJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.ListJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_jobs_async_from_dict(): - await test_list_jobs_async(request_type=dict) - - -def test_list_jobs_flattened(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.ListJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_jobs( - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].filter == 'filter_value' - - -def test_list_jobs_flattened_error(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_jobs( - jobs.ListJobsRequest(), - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - -@pytest.mark.asyncio -async def test_list_jobs_flattened_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.ListJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.ListJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_jobs( - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].filter == 'filter_value' - - -@pytest.mark.asyncio -async def test_list_jobs_flattened_error_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_jobs( - jobs.ListJobsRequest(), - project_id='project_id_value', - region='region_value', - filter='filter_value', - ) - - -def test_list_jobs_pager(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - jobs.Job(), - ], - next_page_token='abc', - ), - jobs.ListJobsResponse( - jobs=[], - next_page_token='def', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - ], - next_page_token='ghi', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - ], - ), - RuntimeError, - ) - - metadata = () - pager = client.list_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, jobs.Job) - for i in results) - -def test_list_jobs_pages(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - jobs.Job(), - ], - next_page_token='abc', - ), - jobs.ListJobsResponse( - jobs=[], - next_page_token='def', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - ], - next_page_token='ghi', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - ], - ), - RuntimeError, - ) - pages = list(client.list_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_jobs_async_pager(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - jobs.Job(), - ], - next_page_token='abc', - ), - jobs.ListJobsResponse( - jobs=[], - next_page_token='def', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - ], - next_page_token='ghi', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, jobs.Job) - for i in responses) - -@pytest.mark.asyncio -async def test_list_jobs_async_pages(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - jobs.Job(), - ], - next_page_token='abc', - ), - jobs.ListJobsResponse( - jobs=[], - next_page_token='def', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - ], - next_page_token='ghi', - ), - jobs.ListJobsResponse( - jobs=[ - jobs.Job(), - jobs.Job(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_job(transport: str = 'grpc', request_type=jobs.UpdateJobRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job( - submitted_by='submitted_by_value', - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), - ) - response = client.update_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.UpdateJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.submitted_by == 'submitted_by_value' - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -def test_update_job_from_dict(): - test_update_job(request_type=dict) - - -def test_update_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_job), - '__call__') as call: - client.update_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.UpdateJobRequest() - - -@pytest.mark.asyncio -async def test_update_job_async(transport: str = 'grpc_asyncio', request_type=jobs.UpdateJobRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( - submitted_by='submitted_by_value', - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - )) - response = await client.update_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.UpdateJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.submitted_by == 'submitted_by_value' - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -@pytest.mark.asyncio -async def test_update_job_async_from_dict(): - await test_update_job_async(request_type=dict) - - -def test_cancel_job(transport: str = 'grpc', request_type=jobs.CancelJobRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job( - submitted_by='submitted_by_value', - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - hadoop_job=jobs.HadoopJob(main_jar_file_uri='main_jar_file_uri_value'), - ) - response = client.cancel_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.CancelJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.submitted_by == 'submitted_by_value' - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -def test_cancel_job_from_dict(): - test_cancel_job(request_type=dict) - - -def test_cancel_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_job), - '__call__') as call: - client.cancel_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.CancelJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_job_async(transport: str = 'grpc_asyncio', request_type=jobs.CancelJobRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job( - submitted_by='submitted_by_value', - driver_output_resource_uri='driver_output_resource_uri_value', - driver_control_files_uri='driver_control_files_uri_value', - job_uuid='job_uuid_value', - done=True, - )) - response = await client.cancel_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.CancelJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, jobs.Job) - assert response.submitted_by == 'submitted_by_value' - assert response.driver_output_resource_uri == 'driver_output_resource_uri_value' - assert response.driver_control_files_uri == 'driver_control_files_uri_value' - assert response.job_uuid == 'job_uuid_value' - assert response.done is True - - -@pytest.mark.asyncio -async def test_cancel_job_async_from_dict(): - await test_cancel_job_async(request_type=dict) - - -def test_cancel_job_flattened(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_job( - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job_id == 'job_id_value' - - -def test_cancel_job_flattened_error(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_job( - jobs.CancelJobRequest(), - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_job_flattened_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = jobs.Job() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_job( - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job_id == 'job_id_value' - - -@pytest.mark.asyncio -async def test_cancel_job_flattened_error_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_job( - jobs.CancelJobRequest(), - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - -def test_delete_job(transport: str = 'grpc', request_type=jobs.DeleteJobRequest): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.DeleteJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_job_from_dict(): - test_delete_job(request_type=dict) - - -def test_delete_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_job), - '__call__') as call: - client.delete_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.DeleteJobRequest() - - -@pytest.mark.asyncio -async def test_delete_job_async(transport: str = 'grpc_asyncio', request_type=jobs.DeleteJobRequest): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == jobs.DeleteJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_job_async_from_dict(): - await test_delete_job_async(request_type=dict) - - -def test_delete_job_flattened(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_job( - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job_id == 'job_id_value' - - -def test_delete_job_flattened_error(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_job( - jobs.DeleteJobRequest(), - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - -@pytest.mark.asyncio -async def test_delete_job_flattened_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_job( - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].region == 'region_value' - assert args[0].job_id == 'job_id_value' - - -@pytest.mark.asyncio -async def test_delete_job_flattened_error_async(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_job( - jobs.DeleteJobRequest(), - project_id='project_id_value', - region='region_value', - job_id='job_id_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.JobControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.JobControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobControllerClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.JobControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobControllerClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = JobControllerClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobControllerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.JobControllerGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.JobControllerGrpcTransport, - transports.JobControllerGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.JobControllerGrpcTransport, - ) - -def test_job_controller_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.JobControllerTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_job_controller_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.JobControllerTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'submit_job', - 'submit_job_as_operation', - 'get_job', - 'list_jobs', - 'update_job', - 'cancel_job', - 'delete_job', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_job_controller_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobControllerTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_job_controller_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobControllerTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_job_controller_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobControllerTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_job_controller_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - JobControllerClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_job_controller_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - JobControllerClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.JobControllerGrpcTransport, - transports.JobControllerGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_job_controller_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.JobControllerGrpcTransport, - transports.JobControllerGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_job_controller_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.JobControllerGrpcTransport, grpc_helpers), - (transports.JobControllerGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_job_controller_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "dataproc.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="dataproc.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.JobControllerGrpcTransport, transports.JobControllerGrpcAsyncIOTransport]) -def test_job_controller_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_job_controller_host_no_port(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), - ) - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_job_controller_host_with_port(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), - ) - assert client.transport._host == 'dataproc.googleapis.com:8000' - -def test_job_controller_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.JobControllerGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_job_controller_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.JobControllerGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.JobControllerGrpcTransport, transports.JobControllerGrpcAsyncIOTransport]) -def test_job_controller_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.JobControllerGrpcTransport, transports.JobControllerGrpcAsyncIOTransport]) -def test_job_controller_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_job_controller_grpc_lro_client(): - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_job_controller_grpc_lro_async_client(): - client = JobControllerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = JobControllerClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = JobControllerClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = JobControllerClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = JobControllerClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = JobControllerClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = JobControllerClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = JobControllerClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = JobControllerClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = JobControllerClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = JobControllerClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = JobControllerClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = JobControllerClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = JobControllerClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = JobControllerClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = JobControllerClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.JobControllerTransport, '_prep_wrapped_messages') as prep: - client = JobControllerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.JobControllerTransport, '_prep_wrapped_messages') as prep: - transport_class = JobControllerClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py b/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py deleted file mode 100644 index 12783033..00000000 --- a/owl-bot-staging/v1beta2/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py +++ /dev/null @@ -1,2842 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.dataproc_v1beta2.services.workflow_template_service import WorkflowTemplateServiceAsyncClient -from google.cloud.dataproc_v1beta2.services.workflow_template_service import WorkflowTemplateServiceClient -from google.cloud.dataproc_v1beta2.services.workflow_template_service import pagers -from google.cloud.dataproc_v1beta2.services.workflow_template_service import transports -from google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.dataproc_v1beta2.types import clusters -from google.cloud.dataproc_v1beta2.types import jobs -from google.cloud.dataproc_v1beta2.types import shared -from google.cloud.dataproc_v1beta2.types import workflow_templates -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(None) is None - assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - WorkflowTemplateServiceClient, - WorkflowTemplateServiceAsyncClient, -]) -def test_workflow_template_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - WorkflowTemplateServiceClient, - WorkflowTemplateServiceAsyncClient, -]) -def test_workflow_template_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.WorkflowTemplateServiceGrpcTransport, "grpc"), - (transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_workflow_template_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - WorkflowTemplateServiceClient, - WorkflowTemplateServiceAsyncClient, -]) -def test_workflow_template_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_workflow_template_service_client_get_transport_class(): - transport = WorkflowTemplateServiceClient.get_transport_class() - available_transports = [ - transports.WorkflowTemplateServiceGrpcTransport, - ] - assert transport in available_transports - - transport = WorkflowTemplateServiceClient.get_transport_class("grpc") - assert transport == transports.WorkflowTemplateServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc"), - (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(WorkflowTemplateServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceClient)) -@mock.patch.object(WorkflowTemplateServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceAsyncClient)) -def test_workflow_template_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(WorkflowTemplateServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(WorkflowTemplateServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc", "true"), - (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc", "false"), - (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(WorkflowTemplateServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceClient)) -@mock.patch.object(WorkflowTemplateServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowTemplateServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_workflow_template_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc"), - (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_workflow_template_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (WorkflowTemplateServiceClient, transports.WorkflowTemplateServiceGrpcTransport, "grpc"), - (WorkflowTemplateServiceAsyncClient, transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_workflow_template_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_workflow_template_service_client_client_options_from_dict(): - with mock.patch('google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = WorkflowTemplateServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_workflow_template(transport: str = 'grpc', request_type=workflow_templates.CreateWorkflowTemplateRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate( - id='id_value', - name='name_value', - version=774, - ) - response = client.create_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, workflow_templates.WorkflowTemplate) - assert response.id == 'id_value' - assert response.name == 'name_value' - assert response.version == 774 - - -def test_create_workflow_template_from_dict(): - test_create_workflow_template(request_type=dict) - - -def test_create_workflow_template_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - client.create_workflow_template() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() - - -@pytest.mark.asyncio -async def test_create_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.CreateWorkflowTemplateRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate( - id='id_value', - name='name_value', - version=774, - )) - response = await client.create_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, workflow_templates.WorkflowTemplate) - assert response.id == 'id_value' - assert response.name == 'name_value' - assert response.version == 774 - - -@pytest.mark.asyncio -async def test_create_workflow_template_async_from_dict(): - await test_create_workflow_template_async(request_type=dict) - - -def test_create_workflow_template_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.CreateWorkflowTemplateRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - call.return_value = workflow_templates.WorkflowTemplate() - client.create_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_workflow_template_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.CreateWorkflowTemplateRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) - await client.create_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_workflow_template_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_workflow_template( - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') - - -def test_create_workflow_template_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_workflow_template( - workflow_templates.CreateWorkflowTemplateRequest(), - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_create_workflow_template_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_workflow_template( - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') - - -@pytest.mark.asyncio -async def test_create_workflow_template_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_workflow_template( - workflow_templates.CreateWorkflowTemplateRequest(), - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - -def test_get_workflow_template(transport: str = 'grpc', request_type=workflow_templates.GetWorkflowTemplateRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate( - id='id_value', - name='name_value', - version=774, - ) - response = client.get_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.GetWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, workflow_templates.WorkflowTemplate) - assert response.id == 'id_value' - assert response.name == 'name_value' - assert response.version == 774 - - -def test_get_workflow_template_from_dict(): - test_get_workflow_template(request_type=dict) - - -def test_get_workflow_template_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - client.get_workflow_template() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.GetWorkflowTemplateRequest() - - -@pytest.mark.asyncio -async def test_get_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.GetWorkflowTemplateRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate( - id='id_value', - name='name_value', - version=774, - )) - response = await client.get_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.GetWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, workflow_templates.WorkflowTemplate) - assert response.id == 'id_value' - assert response.name == 'name_value' - assert response.version == 774 - - -@pytest.mark.asyncio -async def test_get_workflow_template_async_from_dict(): - await test_get_workflow_template_async(request_type=dict) - - -def test_get_workflow_template_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.GetWorkflowTemplateRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - call.return_value = workflow_templates.WorkflowTemplate() - client.get_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_workflow_template_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.GetWorkflowTemplateRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) - await client.get_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_workflow_template_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_workflow_template( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_workflow_template_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_workflow_template( - workflow_templates.GetWorkflowTemplateRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_workflow_template_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_workflow_template( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_workflow_template_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_workflow_template( - workflow_templates.GetWorkflowTemplateRequest(), - name='name_value', - ) - - -def test_instantiate_workflow_template(transport: str = 'grpc', request_type=workflow_templates.InstantiateWorkflowTemplateRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.instantiate_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_instantiate_workflow_template_from_dict(): - test_instantiate_workflow_template(request_type=dict) - - -def test_instantiate_workflow_template_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - client.instantiate_workflow_template() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() - - -@pytest.mark.asyncio -async def test_instantiate_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.InstantiateWorkflowTemplateRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.instantiate_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_instantiate_workflow_template_async_from_dict(): - await test_instantiate_workflow_template_async(request_type=dict) - - -def test_instantiate_workflow_template_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.InstantiateWorkflowTemplateRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.instantiate_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_instantiate_workflow_template_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.InstantiateWorkflowTemplateRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.instantiate_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_instantiate_workflow_template_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.instantiate_workflow_template( - name='name_value', - parameters={'key_value': 'value_value'}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].parameters == {'key_value': 'value_value'} - - -def test_instantiate_workflow_template_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.instantiate_workflow_template( - workflow_templates.InstantiateWorkflowTemplateRequest(), - name='name_value', - parameters={'key_value': 'value_value'}, - ) - - -@pytest.mark.asyncio -async def test_instantiate_workflow_template_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.instantiate_workflow_template( - name='name_value', - parameters={'key_value': 'value_value'}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].parameters == {'key_value': 'value_value'} - - -@pytest.mark.asyncio -async def test_instantiate_workflow_template_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.instantiate_workflow_template( - workflow_templates.InstantiateWorkflowTemplateRequest(), - name='name_value', - parameters={'key_value': 'value_value'}, - ) - - -def test_instantiate_inline_workflow_template(transport: str = 'grpc', request_type=workflow_templates.InstantiateInlineWorkflowTemplateRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.instantiate_inline_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_instantiate_inline_workflow_template_from_dict(): - test_instantiate_inline_workflow_template(request_type=dict) - - -def test_instantiate_inline_workflow_template_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - client.instantiate_inline_workflow_template() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() - - -@pytest.mark.asyncio -async def test_instantiate_inline_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.InstantiateInlineWorkflowTemplateRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.instantiate_inline_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_instantiate_inline_workflow_template_async_from_dict(): - await test_instantiate_inline_workflow_template_async(request_type=dict) - - -def test_instantiate_inline_workflow_template_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.instantiate_inline_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_instantiate_inline_workflow_template_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.instantiate_inline_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_instantiate_inline_workflow_template_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.instantiate_inline_workflow_template( - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') - - -def test_instantiate_inline_workflow_template_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.instantiate_inline_workflow_template( - workflow_templates.InstantiateInlineWorkflowTemplateRequest(), - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_instantiate_inline_workflow_template_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.instantiate_inline_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.instantiate_inline_workflow_template( - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') - - -@pytest.mark.asyncio -async def test_instantiate_inline_workflow_template_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.instantiate_inline_workflow_template( - workflow_templates.InstantiateInlineWorkflowTemplateRequest(), - parent='parent_value', - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - -def test_update_workflow_template(transport: str = 'grpc', request_type=workflow_templates.UpdateWorkflowTemplateRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate( - id='id_value', - name='name_value', - version=774, - ) - response = client.update_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, workflow_templates.WorkflowTemplate) - assert response.id == 'id_value' - assert response.name == 'name_value' - assert response.version == 774 - - -def test_update_workflow_template_from_dict(): - test_update_workflow_template(request_type=dict) - - -def test_update_workflow_template_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - client.update_workflow_template() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() - - -@pytest.mark.asyncio -async def test_update_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.UpdateWorkflowTemplateRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate( - id='id_value', - name='name_value', - version=774, - )) - response = await client.update_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, workflow_templates.WorkflowTemplate) - assert response.id == 'id_value' - assert response.name == 'name_value' - assert response.version == 774 - - -@pytest.mark.asyncio -async def test_update_workflow_template_async_from_dict(): - await test_update_workflow_template_async(request_type=dict) - - -def test_update_workflow_template_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.UpdateWorkflowTemplateRequest() - - request.template.name = 'template.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - call.return_value = workflow_templates.WorkflowTemplate() - client.update_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'template.name=template.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_workflow_template_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.UpdateWorkflowTemplateRequest() - - request.template.name = 'template.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) - await client.update_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'template.name=template.name/value', - ) in kw['metadata'] - - -def test_update_workflow_template_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_workflow_template( - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') - - -def test_update_workflow_template_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_workflow_template( - workflow_templates.UpdateWorkflowTemplateRequest(), - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_update_workflow_template_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.WorkflowTemplate() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.WorkflowTemplate()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_workflow_template( - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].template == workflow_templates.WorkflowTemplate(id='id_value') - - -@pytest.mark.asyncio -async def test_update_workflow_template_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_workflow_template( - workflow_templates.UpdateWorkflowTemplateRequest(), - template=workflow_templates.WorkflowTemplate(id='id_value'), - ) - - -def test_list_workflow_templates(transport: str = 'grpc', request_type=workflow_templates.ListWorkflowTemplatesRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.ListWorkflowTemplatesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_workflow_templates(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListWorkflowTemplatesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_workflow_templates_from_dict(): - test_list_workflow_templates(request_type=dict) - - -def test_list_workflow_templates_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - client.list_workflow_templates() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() - - -@pytest.mark.asyncio -async def test_list_workflow_templates_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.ListWorkflowTemplatesRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.ListWorkflowTemplatesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_workflow_templates(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListWorkflowTemplatesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_workflow_templates_async_from_dict(): - await test_list_workflow_templates_async(request_type=dict) - - -def test_list_workflow_templates_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.ListWorkflowTemplatesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - call.return_value = workflow_templates.ListWorkflowTemplatesResponse() - client.list_workflow_templates(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_workflow_templates_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.ListWorkflowTemplatesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.ListWorkflowTemplatesResponse()) - await client.list_workflow_templates(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_workflow_templates_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.ListWorkflowTemplatesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_workflow_templates( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_workflow_templates_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_workflow_templates( - workflow_templates.ListWorkflowTemplatesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_workflow_templates_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = workflow_templates.ListWorkflowTemplatesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflow_templates.ListWorkflowTemplatesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_workflow_templates( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_workflow_templates_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_workflow_templates( - workflow_templates.ListWorkflowTemplatesRequest(), - parent='parent_value', - ) - - -def test_list_workflow_templates_pager(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - next_page_token='abc', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[], - next_page_token='def', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - ], - next_page_token='ghi', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_workflow_templates(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, workflow_templates.WorkflowTemplate) - for i in results) - -def test_list_workflow_templates_pages(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - next_page_token='abc', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[], - next_page_token='def', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - ], - next_page_token='ghi', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - ), - RuntimeError, - ) - pages = list(client.list_workflow_templates(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_workflow_templates_async_pager(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - next_page_token='abc', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[], - next_page_token='def', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - ], - next_page_token='ghi', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_workflow_templates(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, workflow_templates.WorkflowTemplate) - for i in responses) - -@pytest.mark.asyncio -async def test_list_workflow_templates_async_pages(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_workflow_templates), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - next_page_token='abc', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[], - next_page_token='def', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - ], - next_page_token='ghi', - ), - workflow_templates.ListWorkflowTemplatesResponse( - templates=[ - workflow_templates.WorkflowTemplate(), - workflow_templates.WorkflowTemplate(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_workflow_templates(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_workflow_template(transport: str = 'grpc', request_type=workflow_templates.DeleteWorkflowTemplateRequest): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_workflow_template_from_dict(): - test_delete_workflow_template(request_type=dict) - - -def test_delete_workflow_template_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - client.delete_workflow_template() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() - - -@pytest.mark.asyncio -async def test_delete_workflow_template_async(transport: str = 'grpc_asyncio', request_type=workflow_templates.DeleteWorkflowTemplateRequest): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_workflow_template_async_from_dict(): - await test_delete_workflow_template_async(request_type=dict) - - -def test_delete_workflow_template_field_headers(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.DeleteWorkflowTemplateRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - call.return_value = None - client.delete_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_workflow_template_field_headers_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = workflow_templates.DeleteWorkflowTemplateRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_workflow_template(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_workflow_template_flattened(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_workflow_template( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_workflow_template_flattened_error(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_workflow_template( - workflow_templates.DeleteWorkflowTemplateRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_workflow_template_flattened_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_workflow_template), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_workflow_template( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_workflow_template_flattened_error_async(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_workflow_template( - workflow_templates.DeleteWorkflowTemplateRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.WorkflowTemplateServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.WorkflowTemplateServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = WorkflowTemplateServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.WorkflowTemplateServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = WorkflowTemplateServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.WorkflowTemplateServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = WorkflowTemplateServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.WorkflowTemplateServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.WorkflowTemplateServiceGrpcTransport, - transports.WorkflowTemplateServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.WorkflowTemplateServiceGrpcTransport, - ) - -def test_workflow_template_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.WorkflowTemplateServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_workflow_template_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.WorkflowTemplateServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_workflow_template', - 'get_workflow_template', - 'instantiate_workflow_template', - 'instantiate_inline_workflow_template', - 'update_workflow_template', - 'list_workflow_templates', - 'delete_workflow_template', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_workflow_template_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.WorkflowTemplateServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_workflow_template_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.WorkflowTemplateServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_workflow_template_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.WorkflowTemplateServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_workflow_template_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - WorkflowTemplateServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_workflow_template_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - WorkflowTemplateServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.WorkflowTemplateServiceGrpcTransport, - transports.WorkflowTemplateServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_workflow_template_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.WorkflowTemplateServiceGrpcTransport, - transports.WorkflowTemplateServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_workflow_template_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.WorkflowTemplateServiceGrpcTransport, grpc_helpers), - (transports.WorkflowTemplateServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_workflow_template_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "dataproc.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="dataproc.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.WorkflowTemplateServiceGrpcTransport, transports.WorkflowTemplateServiceGrpcAsyncIOTransport]) -def test_workflow_template_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_workflow_template_service_host_no_port(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com'), - ) - assert client.transport._host == 'dataproc.googleapis.com:443' - - -def test_workflow_template_service_host_with_port(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='dataproc.googleapis.com:8000'), - ) - assert client.transport._host == 'dataproc.googleapis.com:8000' - -def test_workflow_template_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.WorkflowTemplateServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_workflow_template_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.WorkflowTemplateServiceGrpcTransport, transports.WorkflowTemplateServiceGrpcAsyncIOTransport]) -def test_workflow_template_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.WorkflowTemplateServiceGrpcTransport, transports.WorkflowTemplateServiceGrpcAsyncIOTransport]) -def test_workflow_template_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_workflow_template_service_grpc_lro_client(): - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_workflow_template_service_grpc_lro_async_client(): - client = WorkflowTemplateServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_cluster_path(): - project = "squid" - location = "clam" - cluster = "whelk" - expected = "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) - actual = WorkflowTemplateServiceClient.cluster_path(project, location, cluster) - assert expected == actual - - -def test_parse_cluster_path(): - expected = { - "project": "octopus", - "location": "oyster", - "cluster": "nudibranch", - } - path = WorkflowTemplateServiceClient.cluster_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_cluster_path(path) - assert expected == actual - -def test_workflow_template_path(): - project = "cuttlefish" - region = "mussel" - workflow_template = "winkle" - expected = "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format(project=project, region=region, workflow_template=workflow_template, ) - actual = WorkflowTemplateServiceClient.workflow_template_path(project, region, workflow_template) - assert expected == actual - - -def test_parse_workflow_template_path(): - expected = { - "project": "nautilus", - "region": "scallop", - "workflow_template": "abalone", - } - path = WorkflowTemplateServiceClient.workflow_template_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_workflow_template_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = WorkflowTemplateServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = WorkflowTemplateServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = WorkflowTemplateServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = WorkflowTemplateServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = WorkflowTemplateServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = WorkflowTemplateServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = WorkflowTemplateServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = WorkflowTemplateServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = WorkflowTemplateServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = WorkflowTemplateServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.WorkflowTemplateServiceTransport, '_prep_wrapped_messages') as prep: - client = WorkflowTemplateServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.WorkflowTemplateServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = WorkflowTemplateServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py b/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py index 0ea8b97e..58ced7d8 100644 --- a/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py +++ b/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py @@ -135,7 +135,25 @@ def test_autoscaling_policy_service_client_service_account_always_use_jwt(client ) as use_jwt: creds = service_account.Credentials(None, None, None) client = client_class(credentials=creds) - use_jwt.assert_called_with(True) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), + (transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_autoscaling_policy_service_client_service_account_always_use_jwt_true( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) @pytest.mark.parametrize( @@ -2145,7 +2163,7 @@ def test_autoscaling_policy_service_grpc_transport_client_cert_source_for_mtls( "squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ @@ -2254,7 +2272,7 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_sour "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ @@ -2301,7 +2319,7 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_adc(transport_cl "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ diff --git a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py index 55099c5c..c070bea9 100644 --- a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py +++ b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py @@ -139,7 +139,25 @@ def test_cluster_controller_client_service_account_always_use_jwt(client_class): ) as use_jwt: creds = service_account.Credentials(None, None, None) client = client_class(credentials=creds) - use_jwt.assert_called_with(True) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.ClusterControllerGrpcTransport, "grpc"), + (transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_cluster_controller_client_service_account_always_use_jwt_true( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) @pytest.mark.parametrize( @@ -2077,7 +2095,7 @@ def test_cluster_controller_grpc_transport_client_cert_source_for_mtls(transport "squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ @@ -2186,7 +2204,7 @@ def test_cluster_controller_transport_channel_mtls_with_client_cert_source( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ @@ -2233,7 +2251,7 @@ def test_cluster_controller_transport_channel_mtls_with_adc(transport_class): "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ diff --git a/tests/unit/gapic/dataproc_v1/test_job_controller.py b/tests/unit/gapic/dataproc_v1/test_job_controller.py index 17d6d182..c0917e68 100644 --- a/tests/unit/gapic/dataproc_v1/test_job_controller.py +++ b/tests/unit/gapic/dataproc_v1/test_job_controller.py @@ -133,7 +133,25 @@ def test_job_controller_client_service_account_always_use_jwt(client_class): ) as use_jwt: creds = service_account.Credentials(None, None, None) client = client_class(credentials=creds) - use_jwt.assert_called_with(True) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.JobControllerGrpcTransport, "grpc"), + (transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_job_controller_client_service_account_always_use_jwt_true( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) @pytest.mark.parametrize( @@ -1995,7 +2013,7 @@ def test_job_controller_grpc_transport_client_cert_source_for_mtls(transport_cla "squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ @@ -2102,7 +2120,7 @@ def test_job_controller_transport_channel_mtls_with_client_cert_source(transport "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ @@ -2149,7 +2167,7 @@ def test_job_controller_transport_channel_mtls_with_adc(transport_class): "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ diff --git a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py index 7d81d031..30fd4bd1 100644 --- a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py +++ b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py @@ -141,7 +141,25 @@ def test_workflow_template_service_client_service_account_always_use_jwt(client_ ) as use_jwt: creds = service_account.Credentials(None, None, None) client = client_class(credentials=creds) - use_jwt.assert_called_with(True) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.WorkflowTemplateServiceGrpcTransport, "grpc"), + (transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_workflow_template_service_client_service_account_always_use_jwt_true( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) @pytest.mark.parametrize( @@ -2612,7 +2630,7 @@ def test_workflow_template_service_grpc_transport_client_cert_source_for_mtls( "squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ @@ -2721,7 +2739,7 @@ def test_workflow_template_service_transport_channel_mtls_with_client_cert_sourc "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ @@ -2768,7 +2786,7 @@ def test_workflow_template_service_transport_channel_mtls_with_adc(transport_cla "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ diff --git a/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py b/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py index 20937354..c01f0f1e 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py @@ -135,7 +135,25 @@ def test_autoscaling_policy_service_client_service_account_always_use_jwt(client ) as use_jwt: creds = service_account.Credentials(None, None, None) client = client_class(credentials=creds) - use_jwt.assert_called_with(True) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.AutoscalingPolicyServiceGrpcTransport, "grpc"), + (transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_autoscaling_policy_service_client_service_account_always_use_jwt_true( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) @pytest.mark.parametrize( @@ -2145,7 +2163,7 @@ def test_autoscaling_policy_service_grpc_transport_client_cert_source_for_mtls( "squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ @@ -2254,7 +2272,7 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_sour "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ @@ -2301,7 +2319,7 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_adc(transport_cl "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ diff --git a/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py b/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py index 7b6c508d..8414c5ce 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py @@ -141,7 +141,25 @@ def test_cluster_controller_client_service_account_always_use_jwt(client_class): ) as use_jwt: creds = service_account.Credentials(None, None, None) client = client_class(credentials=creds) - use_jwt.assert_called_with(True) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.ClusterControllerGrpcTransport, "grpc"), + (transports.ClusterControllerGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_cluster_controller_client_service_account_always_use_jwt_true( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) @pytest.mark.parametrize( @@ -1919,7 +1937,7 @@ def test_cluster_controller_grpc_transport_client_cert_source_for_mtls(transport "squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ @@ -2028,7 +2046,7 @@ def test_cluster_controller_transport_channel_mtls_with_client_cert_source( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ @@ -2075,7 +2093,7 @@ def test_cluster_controller_transport_channel_mtls_with_adc(transport_class): "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ diff --git a/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py b/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py index 31dc26ba..ba9cf694 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py @@ -135,7 +135,25 @@ def test_job_controller_client_service_account_always_use_jwt(client_class): ) as use_jwt: creds = service_account.Credentials(None, None, None) client = client_class(credentials=creds) - use_jwt.assert_called_with(True) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.JobControllerGrpcTransport, "grpc"), + (transports.JobControllerGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_job_controller_client_service_account_always_use_jwt_true( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) @pytest.mark.parametrize( @@ -2013,7 +2031,7 @@ def test_job_controller_grpc_transport_client_cert_source_for_mtls(transport_cla "squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ @@ -2120,7 +2138,7 @@ def test_job_controller_transport_channel_mtls_with_client_cert_source(transport "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ @@ -2167,7 +2185,7 @@ def test_job_controller_transport_channel_mtls_with_adc(transport_class): "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ diff --git a/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py b/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py index d7fb9c63..813d2bdf 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py @@ -141,7 +141,25 @@ def test_workflow_template_service_client_service_account_always_use_jwt(client_ ) as use_jwt: creds = service_account.Credentials(None, None, None) client = client_class(credentials=creds) - use_jwt.assert_called_with(True) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.WorkflowTemplateServiceGrpcTransport, "grpc"), + (transports.WorkflowTemplateServiceGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_workflow_template_service_client_service_account_always_use_jwt_true( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) @pytest.mark.parametrize( @@ -2612,7 +2630,7 @@ def test_workflow_template_service_grpc_transport_client_cert_source_for_mtls( "squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ @@ -2721,7 +2739,7 @@ def test_workflow_template_service_transport_channel_mtls_with_client_cert_sourc "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ @@ -2768,7 +2786,7 @@ def test_workflow_template_service_transport_channel_mtls_with_adc(transport_cla "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[